In [1]:
#!pip install mediapipe opencv-python pandas scikit-learn
import mediapipe as mp #Meidapipe
import cv2 #opencv
import csv
import numpy as np
import os
import sys
import tqdm
import random
import pandas as pd
import pickle # to save a model

# Package for visualisation
import matplotlib.pyplot as plt
import seaborn as sns

# Packages for model Implementation and Evaluation
from sklearn.metrics import confusion_matrix,classification_report
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split, RepeatedStratifiedKFold
from sklearn.model_selection import RandomizedSearchCV
In [2]:
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Embedding
from keras.layers import SpatialDropout1D
from keras.layers import InputLayer
from keras.layers import Conv1D
from keras.layers import Flatten
from keras.layers import Dropout
from tensorflow.keras.utils import to_categorical
from keras.layers import MaxPooling1D
In [3]:
#Initialising input image path and  output csv path
images_in_folder = 'C:\\Users\\jhaan\\Downloads\\Dataset'
#images_out_folder = 'fitness_poses_images_out_basic'
#csv_out_path = 'yoga_poses_landmark_dataset.csv'

#from mediapipe.solutions import drawing_utils as mp_drawing
mp_drawing = mp.solutions.drawing_utils # Drawing helpers
# from mediapipe.solutions import pose as mp_pose
mp_pose= mp.solutions.pose # Blazepose pose estimation model
In [ ]:
 
In [ ]:
 
In [4]:
import os
import csv
import sys
import cv2
import tqdm
import numpy as np
import mediapipe as mp

# Set the paths for input images and the output CSV file

csv_out_path = 'yoga_poses_landmark_dataset1.csv'
images_out_folder = 'fitness_poses_images_out_basic'

# Iterating through each Folder, converting images into landmark points and saving them to a CSV file
with open(csv_out_path, 'w') as csv_out_file:
    csv_out_writer = csv.writer(csv_out_file, delimiter=',', quoting=csv.QUOTE_MINIMAL)

    # folder names are used as pose class names
    pose_class_names = sorted([n for n in os.listdir(images_in_folder) if not n.startswith('.')])
    counter = 1
    for pose_class_name in pose_class_names:
        print('Extracting landmark points from dataset', pose_class_name, file=sys.stderr)
        if not os.path.exists(os.path.join(images_out_folder, pose_class_name)):
            os.makedirs(os.path.join(images_out_folder, pose_class_name))
        image_names = sorted([
            n for n in os.listdir(os.path.join(images_in_folder, pose_class_name))
            if not n.startswith('.')
        ])
        for image_name in tqdm.tqdm(image_names, position=0):
            # Load image.
            input_frame = cv2.imread(os.path.join(images_in_folder, pose_class_name, image_name))
            input_frame = cv2.cvtColor(input_frame, cv2.COLOR_BGR2RGB)

           # Applying Blazepose model on the image and extracting 33 3D landmark points.
            with mp_pose.Pose() as pose_tracker:
                result = pose_tracker.process(image=input_frame)
                pose_landmarks = result.pose_landmarks

            # Save Landmark points to a csv file.
            if pose_landmarks is not None:
                
                # check the number of landmarks and take pose landmarks.
                assert len(pose_landmarks.landmark) == 33, 'Unexpected number of predicted pose landmarks: {}'.format(len(pose_landmarks.landmark))
                pose_landmark = [[lmk.x, lmk.y, lmk.z] for lmk in pose_landmarks.landmark]

                # Write pose sample to CSV.
                pose_landmarks = np.around(pose_landmark, 5).flatten().astype(str).tolist()
                csv_out_writer.writerow([pose_class_name] + pose_landmarks)
Extracting landmark points from dataset Adho Mukha Svanasana
100%|████████████████████████████████████████████████████████████████████████████████| 320/320 [04:31<00:00,  1.18it/s]
Extracting landmark points from dataset BALASANA
100%|████████████████████████████████████████████████████████████████████████████████| 261/261 [03:25<00:00,  1.27it/s]
Extracting landmark points from dataset UTKATA KONASANA
100%|████████████████████████████████████████████████████████████████████████████████| 180/180 [02:24<00:00,  1.25it/s]
Extracting landmark points from dataset VIRABHADRASANA
100%|████████████████████████████████████████████████████████████████████████████████| 209/209 [07:09<00:00,  2.05s/it]
Extracting landmark points from dataset VRIKSHASANA
100%|████████████████████████████████████████████████████████████████████████████████| 334/334 [04:21<00:00,  1.27it/s]
In [5]:
df_csv = pd.read_csv('./yoga_poses_landmark_dataset1.csv', header=None)
In [6]:
l1=['pose_name']
for i in range(1,34):
    l1.append('x'+str(i))
    l1.append('y'+str(i))
    l1.append('z'+str(i))
In [7]:
df_csv.to_csv('./yoga_poses_landmark_dataset_new11.csv', header=l1, index=False)
In [8]:
df_csv1 = pd.read_csv('./yoga_poses_landmark_dataset_new11.csv')
df_csv1
Out[8]:
pose_name x1 y1 z1 x2 y2 z2 x3 y3 z3 ... z30 x31 y31 z31 x32 y32 z32 x33 y33 z33
0 Adho Mukha Svanasana 0.51255 0.72271 -0.06939 0.49571 0.74738 -0.10888 0.49218 0.74686 -0.10889 ... -0.09030 0.87879 0.88034 0.34875 0.78911 0.92161 -0.25806 0.77612 0.90703 0.24612
1 Adho Mukha Svanasana 0.56914 0.78736 -0.04211 0.58289 0.79472 -0.02245 0.58382 0.79329 -0.02254 ... 0.16459 0.14689 0.87059 -0.14172 0.24892 0.90370 0.07002 0.23911 0.91070 -0.27126
2 Adho Mukha Svanasana 0.60981 0.73095 -0.02633 0.62893 0.73618 -0.00001 0.63077 0.73358 -0.00006 ... 0.32132 0.05536 0.88034 -0.16074 0.20473 0.90643 0.19393 0.19505 0.92314 -0.33403
3 Adho Mukha Svanasana 0.54014 0.76076 -0.09753 0.56006 0.77120 -0.07502 0.56291 0.76970 -0.07497 ... 0.39135 0.15186 0.85156 -0.04104 0.27583 0.88195 0.27128 0.26445 0.88739 -0.20759
4 Adho Mukha Svanasana 0.35996 0.70324 -0.03186 0.33947 0.71399 -0.07545 0.33654 0.71106 -0.07544 ... -0.13264 0.85920 0.84807 0.35368 0.75283 0.94305 -0.29861 0.72678 0.92300 0.25760
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
1151 VRIKSHASANA 0.25630 0.24405 0.20495 0.25462 0.23148 0.18330 0.25164 0.22982 0.18328 ... -0.00287 0.24393 0.97923 0.01453 0.16519 0.96654 -0.04811 0.25233 0.96893 -0.02125
1152 VRIKSHASANA 0.54245 0.12668 -0.45318 0.55250 0.11328 -0.40451 0.55850 0.11340 -0.40459 ... 0.28987 0.50132 0.53053 0.55582 0.50007 0.88824 0.01557 0.52138 0.62121 0.51723
1153 VRIKSHASANA 0.46805 0.24539 -0.64011 0.48047 0.22870 -0.57668 0.48989 0.22795 -0.57661 ... 0.24510 0.49847 0.87207 0.26612 0.45072 0.65527 0.12706 0.46229 0.90014 0.00781
1154 VRIKSHASANA 0.50272 0.22139 -0.18033 0.50743 0.20640 -0.14932 0.51033 0.20665 -0.14930 ... 0.26938 0.51896 0.93901 0.12276 0.49651 0.69156 0.23207 0.51610 0.96354 0.00034
1155 VRIKSHASANA 0.42180 0.36387 -0.78332 0.43354 0.34638 -0.71442 0.44216 0.34566 -0.71427 ... 0.29859 0.39522 0.95606 0.56444 0.42328 0.96545 0.19362 0.39969 1.02506 0.40572

1156 rows × 100 columns

In [9]:
import csv
import random

# Set the path for your CSV file
csv_file_path = './yoga_poses_landmark_dataset_new11.csv'

# Read the CSV file into a list
with open(csv_file_path, 'r') as csv_file:
    csv_reader = csv.reader(csv_file)
    rows = list(csv_reader)

    
# Separate the header (first row) and the data rows
header = rows[0]
data = rows[1:]

# Shuffle the rows randomly
random.shuffle(data)

# Write the shuffled rows back to the CSV file, including the header
with open(csv_file_path, 'w', newline='') as csv_file:
    csv_writer = csv.writer(csv_file)
    csv_writer.writerow(header)  # Write the header
    csv_writer.writerows(data)  # Write the shuffled data rows

print("CSV file rows shuffled (excluding header) successfully.")
CSV file rows shuffled (excluding header) successfully.
In [7]:
df = pd.read_csv('./yoga_poses_landmark_dataset_new11.csv')
df
Out[7]:
pose_name x1 y1 z1 x2 y2 z2 x3 y3 z3 ... z30 x31 y31 z31 x32 y32 z32 x33 y33 z33
0 VRIKSHASANA 0.45108 0.22382 -0.38753 0.45272 0.21546 -0.32575 0.45740 0.21562 -0.32567 ... 0.27237 0.46001 0.88263 0.03023 0.37165 0.64216 0.18925 0.44391 0.92220 -0.22734
1 Adho Mukha Svanasana 0.38190 0.61599 -0.03467 0.36080 0.62489 -0.07199 0.35736 0.62277 -0.07202 ... -0.00884 0.86785 0.68003 0.33441 0.80277 0.73926 -0.10481 0.78310 0.72003 0.26865
2 UTKATA KONASANA 0.51682 0.11190 -0.27063 0.52912 0.09247 -0.22790 0.53516 0.09281 -0.22786 ... 0.07232 0.27026 0.89756 0.01342 0.84838 0.90763 -0.05888 0.16281 0.90999 -0.10910
3 BALASANA 0.38900 0.68686 -0.14106 0.37749 0.67608 -0.16306 0.37862 0.67272 -0.16308 ... 0.10843 0.73661 0.71348 0.45402 0.79683 0.73861 0.18529 0.79332 0.74329 0.51004
4 VRIKSHASANA 0.51221 0.25020 -0.63206 0.51780 0.23750 -0.57801 0.52381 0.23698 -0.57799 ... 0.29490 0.50963 0.86296 0.18386 0.50635 0.71410 0.16342 0.47599 0.89370 -0.08674
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
1151 VRIKSHASANA 0.50029 0.23499 -0.35094 0.50675 0.21774 -0.32488 0.51091 0.21660 -0.32494 ... 0.18347 0.48110 0.52805 0.30461 0.54291 0.85271 0.04053 0.49234 0.57942 0.28286
1152 UTKATA KONASANA 0.48911 0.23338 -0.36021 0.49504 0.21518 -0.32105 0.49918 0.21460 -0.32104 ... 0.11543 0.33239 0.86518 0.09871 0.71403 0.88187 -0.00813 0.26140 0.88380 -0.01904
1153 Adho Mukha Svanasana 0.61617 0.62816 -0.07147 0.64057 0.63058 -0.07773 0.64352 0.62633 -0.07773 ... 0.93123 0.05115 0.73228 0.48306 0.21489 0.72165 0.85823 0.17197 0.75434 0.37827
1154 UTKATA KONASANA 0.52691 0.16386 -0.36377 0.54309 0.14916 -0.35017 0.55020 0.15020 -0.35020 ... -0.19776 0.45360 0.80157 0.34222 0.50379 0.94044 -0.39184 0.38278 0.82205 0.24056
1155 BALASANA 0.52860 0.86613 -0.22359 0.50409 0.85756 -0.26074 0.50348 0.85489 -0.26078 ... 0.03842 0.82441 0.79253 0.21603 0.76356 0.83194 0.16082 0.82202 0.85543 0.28424

1156 rows × 100 columns

In [8]:
import matplotlib.pyplot as plt

# Assuming 'df' contains your dataset and 'pose_name' is the column representing classes
class_counts = df['pose_name'].value_counts()

# Plotting a bar plot for class distribution
plt.figure(figsize=(8, 6))
class_counts.plot(kind='bar', color='skyblue')
plt.title('Class Distribution')
plt.xlabel('Yoga Poses')
plt.ylabel('Number of Instances')
plt.xticks(rotation=45)  # Rotating x-labels for better readability
plt.tight_layout()
plt.show()

Feature Encoding¶

In [9]:
# Encode the response variable into numrical values
from sklearn.preprocessing import LabelEncoder
labelencoder = LabelEncoder()
df['label_enc'] = labelencoder.fit_transform(df['pose_name'])
In [10]:
classes= df[['label_enc','pose_name']].drop_duplicates()
classes
Out[10]:
label_enc pose_name
0 4 VRIKSHASANA
1 0 Adho Mukha Svanasana
2 2 UTKATA KONASANA
3 1 BALASANA
8 3 VIRABHADRASANA
In [11]:
# Adding pose names and encode values to a dictionary for display purposes
classes.set_index('label_enc', inplace= True)
yoga_pose=classes.to_dict()
yoga_pose_dict=yoga_pose['pose_name']
yoga_pose_dict
Out[11]:
{4: 'VRIKSHASANA',
 0: 'Adho Mukha Svanasana',
 2: 'UTKATA KONASANA',
 1: 'BALASANA',
 3: 'VIRABHADRASANA'}

Splitting data into train test split - 70-30 ratio¶

In [12]:
X=df.drop(['pose_name','label_enc'], axis=1) # independent variable
y=df['label_enc'] # dependent variable
In [13]:
A=X.columns
In [14]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.3, random_state=1234,stratify=y)

LSTM Model Implementation¶

In [15]:
# reshaping the input array before applying to lstm
y_train_re= to_categorical(y_train).astype(int)
y_test_re= to_categorical(y_test).astype(int)

X_train_lstm= np.array(X_train)
X_test_lstm = np.array(X_test)
X_train_lstm=X_train_lstm.reshape(X_train_lstm.shape[0],1,X_train_lstm.shape[1])
X_test_lstm=X_test_lstm.reshape(X_test_lstm.shape[0],1,X_test_lstm.shape[1])
In [16]:
#LSTM Model Implementation
#tf.set_random_seed(122)
model = Sequential()
model.add(LSTM(64, return_sequences=True, activation='relu', dropout=0.2, input_shape=(1,X_train_lstm.shape[2])))

model.add(LSTM(128, return_sequences=True, dropout=0.2, activation='relu'))

model.add(LSTM(64, return_sequences=False, dropout=0.2, activation='relu'))

model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(5, activation='softmax'))

#Compile
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['categorical_accuracy'])
#Fitting LSTM model on train data
history_model_lstm=model.fit(X_train_lstm, y_train_re, epochs=200, validation_data=(X_test_lstm,y_test_re))
Epoch 1/200
26/26 [==============================] - 13s 85ms/step - loss: 1.6039 - categorical_accuracy: 0.2522 - val_loss: 1.5887 - val_categorical_accuracy: 0.2536
Epoch 2/200
26/26 [==============================] - 1s 28ms/step - loss: 1.5097 - categorical_accuracy: 0.3127 - val_loss: 1.3511 - val_categorical_accuracy: 0.4553
Epoch 3/200
26/26 [==============================] - 1s 37ms/step - loss: 1.2970 - categorical_accuracy: 0.4697 - val_loss: 1.0920 - val_categorical_accuracy: 0.5072
Epoch 4/200
26/26 [==============================] - 1s 28ms/step - loss: 1.1602 - categorical_accuracy: 0.4648 - val_loss: 1.0430 - val_categorical_accuracy: 0.5101
Epoch 5/200
26/26 [==============================] - 1s 24ms/step - loss: 1.1210 - categorical_accuracy: 0.4685 - val_loss: 1.0283 - val_categorical_accuracy: 0.4870
Epoch 6/200
26/26 [==============================] - 1s 26ms/step - loss: 1.0864 - categorical_accuracy: 0.5019 - val_loss: 1.0028 - val_categorical_accuracy: 0.5159
Epoch 7/200
26/26 [==============================] - 1s 25ms/step - loss: 1.0351 - categorical_accuracy: 0.5093 - val_loss: 0.9425 - val_categorical_accuracy: 0.5764
Epoch 8/200
26/26 [==============================] - 1s 27ms/step - loss: 1.0191 - categorical_accuracy: 0.5328 - val_loss: 0.9388 - val_categorical_accuracy: 0.5389
Epoch 9/200
26/26 [==============================] - 1s 21ms/step - loss: 1.0061 - categorical_accuracy: 0.5451 - val_loss: 0.9195 - val_categorical_accuracy: 0.5735
Epoch 10/200
26/26 [==============================] - 1s 24ms/step - loss: 0.9606 - categorical_accuracy: 0.5414 - val_loss: 0.8871 - val_categorical_accuracy: 0.6138
Epoch 11/200
26/26 [==============================] - 1s 24ms/step - loss: 0.9638 - categorical_accuracy: 0.5513 - val_loss: 0.8919 - val_categorical_accuracy: 0.6052
Epoch 12/200
26/26 [==============================] - 1s 21ms/step - loss: 0.9369 - categorical_accuracy: 0.5711 - val_loss: 0.8512 - val_categorical_accuracy: 0.6369
Epoch 13/200
26/26 [==============================] - 1s 24ms/step - loss: 0.9297 - categorical_accuracy: 0.5834 - val_loss: 0.8298 - val_categorical_accuracy: 0.6455
Epoch 14/200
26/26 [==============================] - 1s 25ms/step - loss: 0.9137 - categorical_accuracy: 0.5735 - val_loss: 0.8201 - val_categorical_accuracy: 0.6340
Epoch 15/200
26/26 [==============================] - 1s 25ms/step - loss: 0.9158 - categorical_accuracy: 0.5773 - val_loss: 0.8290 - val_categorical_accuracy: 0.6311
Epoch 16/200
26/26 [==============================] - 1s 30ms/step - loss: 0.9057 - categorical_accuracy: 0.5847 - val_loss: 0.8088 - val_categorical_accuracy: 0.6455
Epoch 17/200
26/26 [==============================] - 1s 24ms/step - loss: 0.8942 - categorical_accuracy: 0.5871 - val_loss: 0.7881 - val_categorical_accuracy: 0.6427
Epoch 18/200
26/26 [==============================] - 0s 19ms/step - loss: 0.8916 - categorical_accuracy: 0.5921 - val_loss: 0.8118 - val_categorical_accuracy: 0.6340
Epoch 19/200
26/26 [==============================] - 1s 23ms/step - loss: 0.8910 - categorical_accuracy: 0.6032 - val_loss: 0.7970 - val_categorical_accuracy: 0.6369
Epoch 20/200
26/26 [==============================] - 1s 21ms/step - loss: 0.8543 - categorical_accuracy: 0.6131 - val_loss: 0.8291 - val_categorical_accuracy: 0.5850
Epoch 21/200
26/26 [==============================] - 1s 30ms/step - loss: 0.8503 - categorical_accuracy: 0.6020 - val_loss: 0.7743 - val_categorical_accuracy: 0.6282
Epoch 22/200
26/26 [==============================] - 1s 31ms/step - loss: 0.8549 - categorical_accuracy: 0.5933 - val_loss: 0.7426 - val_categorical_accuracy: 0.6628
Epoch 23/200
26/26 [==============================] - 1s 24ms/step - loss: 0.8034 - categorical_accuracy: 0.6477 - val_loss: 0.7316 - val_categorical_accuracy: 0.7147
Epoch 24/200
26/26 [==============================] - 1s 21ms/step - loss: 0.8078 - categorical_accuracy: 0.6465 - val_loss: 0.7045 - val_categorical_accuracy: 0.7378
Epoch 25/200
26/26 [==============================] - 1s 27ms/step - loss: 0.7849 - categorical_accuracy: 0.6922 - val_loss: 0.6706 - val_categorical_accuracy: 0.7896
Epoch 26/200
26/26 [==============================] - 1s 30ms/step - loss: 0.7621 - categorical_accuracy: 0.6922 - val_loss: 0.6479 - val_categorical_accuracy: 0.7839
Epoch 27/200
26/26 [==============================] - 1s 27ms/step - loss: 0.7209 - categorical_accuracy: 0.7108 - val_loss: 0.6232 - val_categorical_accuracy: 0.7983
Epoch 28/200
26/26 [==============================] - 1s 23ms/step - loss: 0.7318 - categorical_accuracy: 0.7132 - val_loss: 0.6353 - val_categorical_accuracy: 0.7781
Epoch 29/200
26/26 [==============================] - 1s 23ms/step - loss: 0.6804 - categorical_accuracy: 0.7293 - val_loss: 0.5870 - val_categorical_accuracy: 0.8127
Epoch 30/200
26/26 [==============================] - 1s 23ms/step - loss: 0.7157 - categorical_accuracy: 0.7095 - val_loss: 0.6070 - val_categorical_accuracy: 0.8329
Epoch 31/200
26/26 [==============================] - 1s 20ms/step - loss: 0.6775 - categorical_accuracy: 0.7268 - val_loss: 0.6165 - val_categorical_accuracy: 0.8156
Epoch 32/200
26/26 [==============================] - 1s 21ms/step - loss: 0.6523 - categorical_accuracy: 0.7528 - val_loss: 0.6275 - val_categorical_accuracy: 0.8184
Epoch 33/200
26/26 [==============================] - 1s 24ms/step - loss: 0.6449 - categorical_accuracy: 0.7577 - val_loss: 0.5766 - val_categorical_accuracy: 0.8415
Epoch 34/200
26/26 [==============================] - 1s 22ms/step - loss: 0.6434 - categorical_accuracy: 0.7577 - val_loss: 0.5806 - val_categorical_accuracy: 0.8444
Epoch 35/200
26/26 [==============================] - 1s 24ms/step - loss: 0.6584 - categorical_accuracy: 0.7491 - val_loss: 0.5601 - val_categorical_accuracy: 0.8646
Epoch 36/200
26/26 [==============================] - 1s 20ms/step - loss: 0.6211 - categorical_accuracy: 0.7553 - val_loss: 0.5464 - val_categorical_accuracy: 0.8559
Epoch 37/200
26/26 [==============================] - 1s 22ms/step - loss: 0.6314 - categorical_accuracy: 0.7689 - val_loss: 0.5408 - val_categorical_accuracy: 0.8271
Epoch 38/200
26/26 [==============================] - 1s 23ms/step - loss: 0.6066 - categorical_accuracy: 0.7639 - val_loss: 0.5199 - val_categorical_accuracy: 0.8386
Epoch 39/200
26/26 [==============================] - 1s 28ms/step - loss: 0.6134 - categorical_accuracy: 0.7614 - val_loss: 0.5114 - val_categorical_accuracy: 0.8530
Epoch 40/200
26/26 [==============================] - 1s 31ms/step - loss: 0.5683 - categorical_accuracy: 0.7936 - val_loss: 0.5258 - val_categorical_accuracy: 0.8473
Epoch 41/200
26/26 [==============================] - 1s 25ms/step - loss: 0.6127 - categorical_accuracy: 0.7750 - val_loss: 0.5730 - val_categorical_accuracy: 0.8646
Epoch 42/200
26/26 [==============================] - 0s 18ms/step - loss: 0.5619 - categorical_accuracy: 0.7985 - val_loss: 0.4802 - val_categorical_accuracy: 0.8530
Epoch 43/200
26/26 [==============================] - 0s 19ms/step - loss: 0.5427 - categorical_accuracy: 0.7948 - val_loss: 0.4898 - val_categorical_accuracy: 0.8588
Epoch 44/200
26/26 [==============================] - 1s 29ms/step - loss: 0.5311 - categorical_accuracy: 0.7998 - val_loss: 0.4959 - val_categorical_accuracy: 0.8588
Epoch 45/200
26/26 [==============================] - 1s 31ms/step - loss: 0.5695 - categorical_accuracy: 0.7936 - val_loss: 0.5674 - val_categorical_accuracy: 0.8530
Epoch 46/200
26/26 [==============================] - 1s 22ms/step - loss: 0.5607 - categorical_accuracy: 0.7960 - val_loss: 0.4893 - val_categorical_accuracy: 0.8674
Epoch 47/200
26/26 [==============================] - 1s 27ms/step - loss: 0.5428 - categorical_accuracy: 0.7936 - val_loss: 0.4904 - val_categorical_accuracy: 0.8703
Epoch 48/200
26/26 [==============================] - 1s 30ms/step - loss: 0.5176 - categorical_accuracy: 0.7960 - val_loss: 0.4997 - val_categorical_accuracy: 0.8732
Epoch 49/200
26/26 [==============================] - 1s 24ms/step - loss: 0.5333 - categorical_accuracy: 0.8047 - val_loss: 0.5092 - val_categorical_accuracy: 0.8674
Epoch 50/200
26/26 [==============================] - 1s 24ms/step - loss: 0.5426 - categorical_accuracy: 0.7960 - val_loss: 0.4570 - val_categorical_accuracy: 0.8732
Epoch 51/200
26/26 [==============================] - 1s 21ms/step - loss: 0.5314 - categorical_accuracy: 0.7998 - val_loss: 0.4288 - val_categorical_accuracy: 0.8818
Epoch 52/200
26/26 [==============================] - 1s 22ms/step - loss: 0.5528 - categorical_accuracy: 0.7911 - val_loss: 0.5036 - val_categorical_accuracy: 0.8732
Epoch 53/200
26/26 [==============================] - 1s 28ms/step - loss: 0.5321 - categorical_accuracy: 0.8035 - val_loss: 0.5161 - val_categorical_accuracy: 0.8847
Epoch 54/200
26/26 [==============================] - 1s 35ms/step - loss: 0.5129 - categorical_accuracy: 0.8096 - val_loss: 0.4819 - val_categorical_accuracy: 0.8646
Epoch 55/200
26/26 [==============================] - 1s 20ms/step - loss: 0.5285 - categorical_accuracy: 0.7985 - val_loss: 0.4820 - val_categorical_accuracy: 0.8703
Epoch 56/200
26/26 [==============================] - 1s 28ms/step - loss: 0.5000 - categorical_accuracy: 0.8109 - val_loss: 0.4532 - val_categorical_accuracy: 0.8818
Epoch 57/200
26/26 [==============================] - 1s 39ms/step - loss: 0.5439 - categorical_accuracy: 0.8022 - val_loss: 0.4705 - val_categorical_accuracy: 0.8761
Epoch 58/200
26/26 [==============================] - 1s 28ms/step - loss: 0.4644 - categorical_accuracy: 0.8393 - val_loss: 0.4814 - val_categorical_accuracy: 0.8732
Epoch 59/200
26/26 [==============================] - 1s 22ms/step - loss: 0.5287 - categorical_accuracy: 0.8109 - val_loss: 0.4613 - val_categorical_accuracy: 0.8530
Epoch 60/200
26/26 [==============================] - 1s 24ms/step - loss: 0.5224 - categorical_accuracy: 0.8047 - val_loss: 0.4653 - val_categorical_accuracy: 0.8732
Epoch 61/200
26/26 [==============================] - 1s 24ms/step - loss: 0.5026 - categorical_accuracy: 0.8121 - val_loss: 0.4680 - val_categorical_accuracy: 0.8732
Epoch 62/200
26/26 [==============================] - 1s 26ms/step - loss: 0.5023 - categorical_accuracy: 0.8121 - val_loss: 0.4688 - val_categorical_accuracy: 0.8790
Epoch 63/200
26/26 [==============================] - 1s 27ms/step - loss: 0.5003 - categorical_accuracy: 0.8158 - val_loss: 0.4763 - val_categorical_accuracy: 0.8646
Epoch 64/200
26/26 [==============================] - 1s 21ms/step - loss: 0.5094 - categorical_accuracy: 0.8171 - val_loss: 0.4719 - val_categorical_accuracy: 0.8876
Epoch 65/200
26/26 [==============================] - 1s 33ms/step - loss: 0.5328 - categorical_accuracy: 0.7985 - val_loss: 0.5462 - val_categorical_accuracy: 0.8617
Epoch 66/200
26/26 [==============================] - 1s 35ms/step - loss: 0.4896 - categorical_accuracy: 0.8245 - val_loss: 0.4326 - val_categorical_accuracy: 0.8790
Epoch 67/200
26/26 [==============================] - 1s 26ms/step - loss: 0.5169 - categorical_accuracy: 0.8121 - val_loss: 0.4245 - val_categorical_accuracy: 0.8790
Epoch 68/200
26/26 [==============================] - 1s 19ms/step - loss: 0.4734 - categorical_accuracy: 0.8393 - val_loss: 0.4389 - val_categorical_accuracy: 0.8732
Epoch 69/200
26/26 [==============================] - 1s 29ms/step - loss: 0.4709 - categorical_accuracy: 0.8356 - val_loss: 0.3998 - val_categorical_accuracy: 0.8847
Epoch 70/200
26/26 [==============================] - 1s 25ms/step - loss: 0.5100 - categorical_accuracy: 0.8022 - val_loss: 0.4241 - val_categorical_accuracy: 0.8790
Epoch 71/200
26/26 [==============================] - 1s 29ms/step - loss: 0.4649 - categorical_accuracy: 0.8183 - val_loss: 0.4130 - val_categorical_accuracy: 0.8703
Epoch 72/200
26/26 [==============================] - 1s 27ms/step - loss: 0.4837 - categorical_accuracy: 0.8294 - val_loss: 0.4813 - val_categorical_accuracy: 0.8703
Epoch 73/200
26/26 [==============================] - 1s 25ms/step - loss: 0.5041 - categorical_accuracy: 0.8171 - val_loss: 0.4842 - val_categorical_accuracy: 0.8646
Epoch 74/200
26/26 [==============================] - 1s 20ms/step - loss: 0.4665 - categorical_accuracy: 0.8232 - val_loss: 0.4654 - val_categorical_accuracy: 0.8732
Epoch 75/200
26/26 [==============================] - 1s 27ms/step - loss: 0.4432 - categorical_accuracy: 0.8307 - val_loss: 0.4391 - val_categorical_accuracy: 0.8790
Epoch 76/200
26/26 [==============================] - 1s 23ms/step - loss: 0.4511 - categorical_accuracy: 0.8368 - val_loss: 0.4161 - val_categorical_accuracy: 0.8761
Epoch 77/200
26/26 [==============================] - 1s 25ms/step - loss: 0.4522 - categorical_accuracy: 0.8344 - val_loss: 0.3912 - val_categorical_accuracy: 0.8761
Epoch 78/200
26/26 [==============================] - 1s 26ms/step - loss: 0.4663 - categorical_accuracy: 0.8072 - val_loss: 0.4393 - val_categorical_accuracy: 0.8703
Epoch 79/200
26/26 [==============================] - 1s 25ms/step - loss: 0.4482 - categorical_accuracy: 0.8319 - val_loss: 0.4456 - val_categorical_accuracy: 0.8674
Epoch 80/200
26/26 [==============================] - 1s 28ms/step - loss: 0.4367 - categorical_accuracy: 0.8307 - val_loss: 0.4432 - val_categorical_accuracy: 0.8646
Epoch 81/200
26/26 [==============================] - 1s 27ms/step - loss: 0.4824 - categorical_accuracy: 0.8245 - val_loss: 0.4471 - val_categorical_accuracy: 0.8847
Epoch 82/200
26/26 [==============================] - 0s 18ms/step - loss: 0.4589 - categorical_accuracy: 0.8356 - val_loss: 0.4145 - val_categorical_accuracy: 0.8761
Epoch 83/200
26/26 [==============================] - 1s 30ms/step - loss: 0.4589 - categorical_accuracy: 0.8232 - val_loss: 0.4208 - val_categorical_accuracy: 0.8847
Epoch 84/200
26/26 [==============================] - 1s 38ms/step - loss: 0.4372 - categorical_accuracy: 0.8393 - val_loss: 0.3922 - val_categorical_accuracy: 0.8905
Epoch 85/200
26/26 [==============================] - 1s 24ms/step - loss: 0.4535 - categorical_accuracy: 0.8443 - val_loss: 0.4022 - val_categorical_accuracy: 0.8732
Epoch 86/200
26/26 [==============================] - 1s 20ms/step - loss: 0.4392 - categorical_accuracy: 0.8455 - val_loss: 0.4244 - val_categorical_accuracy: 0.8790
Epoch 87/200
26/26 [==============================] - 1s 32ms/step - loss: 0.4132 - categorical_accuracy: 0.8554 - val_loss: 0.3939 - val_categorical_accuracy: 0.8674
Epoch 88/200
26/26 [==============================] - 1s 23ms/step - loss: 0.4790 - categorical_accuracy: 0.8257 - val_loss: 0.4368 - val_categorical_accuracy: 0.8818
Epoch 89/200
26/26 [==============================] - 1s 32ms/step - loss: 0.4714 - categorical_accuracy: 0.8208 - val_loss: 0.4137 - val_categorical_accuracy: 0.8876
Epoch 90/200
26/26 [==============================] - 1s 25ms/step - loss: 0.4234 - categorical_accuracy: 0.8368 - val_loss: 0.4083 - val_categorical_accuracy: 0.8761
Epoch 91/200
26/26 [==============================] - 1s 28ms/step - loss: 0.4351 - categorical_accuracy: 0.8307 - val_loss: 0.4389 - val_categorical_accuracy: 0.8847
Epoch 92/200
26/26 [==============================] - 1s 37ms/step - loss: 0.4587 - categorical_accuracy: 0.8282 - val_loss: 0.5017 - val_categorical_accuracy: 0.8761
Epoch 93/200
26/26 [==============================] - 1s 23ms/step - loss: 0.4021 - categorical_accuracy: 0.8430 - val_loss: 0.4723 - val_categorical_accuracy: 0.8732
Epoch 94/200
26/26 [==============================] - 1s 24ms/step - loss: 0.4520 - categorical_accuracy: 0.8245 - val_loss: 0.4624 - val_categorical_accuracy: 0.8790
Epoch 95/200
26/26 [==============================] - 1s 23ms/step - loss: 0.4617 - categorical_accuracy: 0.8344 - val_loss: 0.4661 - val_categorical_accuracy: 0.8732
Epoch 96/200
26/26 [==============================] - 1s 24ms/step - loss: 0.4083 - categorical_accuracy: 0.8480 - val_loss: 0.4779 - val_categorical_accuracy: 0.8905
Epoch 97/200
26/26 [==============================] - 1s 34ms/step - loss: 0.4227 - categorical_accuracy: 0.8640 - val_loss: 0.4635 - val_categorical_accuracy: 0.8876
Epoch 98/200
26/26 [==============================] - 1s 24ms/step - loss: 0.4387 - categorical_accuracy: 0.8356 - val_loss: 0.4415 - val_categorical_accuracy: 0.8876
Epoch 99/200
26/26 [==============================] - 1s 20ms/step - loss: 0.4091 - categorical_accuracy: 0.8443 - val_loss: 0.4253 - val_categorical_accuracy: 0.8847
Epoch 100/200
26/26 [==============================] - 1s 27ms/step - loss: 0.4640 - categorical_accuracy: 0.8294 - val_loss: 0.4261 - val_categorical_accuracy: 0.8963
Epoch 101/200
26/26 [==============================] - 1s 28ms/step - loss: 0.4362 - categorical_accuracy: 0.8430 - val_loss: 0.4423 - val_categorical_accuracy: 0.8876
Epoch 102/200
26/26 [==============================] - 1s 27ms/step - loss: 0.4188 - categorical_accuracy: 0.8455 - val_loss: 0.4060 - val_categorical_accuracy: 0.8876
Epoch 103/200
26/26 [==============================] - 1s 23ms/step - loss: 0.4576 - categorical_accuracy: 0.8245 - val_loss: 0.3859 - val_categorical_accuracy: 0.8818
Epoch 104/200
26/26 [==============================] - 1s 24ms/step - loss: 0.3876 - categorical_accuracy: 0.8418 - val_loss: 0.4150 - val_categorical_accuracy: 0.8818
Epoch 105/200
26/26 [==============================] - 1s 29ms/step - loss: 0.4438 - categorical_accuracy: 0.8393 - val_loss: 0.3639 - val_categorical_accuracy: 0.8934
Epoch 106/200
26/26 [==============================] - 1s 28ms/step - loss: 0.4127 - categorical_accuracy: 0.8381 - val_loss: 0.3568 - val_categorical_accuracy: 0.8934
Epoch 107/200
26/26 [==============================] - 1s 22ms/step - loss: 0.4549 - categorical_accuracy: 0.8368 - val_loss: 0.3887 - val_categorical_accuracy: 0.8905
Epoch 108/200
26/26 [==============================] - 1s 25ms/step - loss: 0.4538 - categorical_accuracy: 0.8331 - val_loss: 0.4051 - val_categorical_accuracy: 0.8847
Epoch 109/200
26/26 [==============================] - 1s 30ms/step - loss: 0.4230 - categorical_accuracy: 0.8418 - val_loss: 0.4139 - val_categorical_accuracy: 0.8934
Epoch 110/200
26/26 [==============================] - 1s 28ms/step - loss: 0.4304 - categorical_accuracy: 0.8257 - val_loss: 0.4125 - val_categorical_accuracy: 0.8934
Epoch 111/200
26/26 [==============================] - 1s 27ms/step - loss: 0.3986 - categorical_accuracy: 0.8368 - val_loss: 0.4183 - val_categorical_accuracy: 0.8934
Epoch 112/200
26/26 [==============================] - 1s 19ms/step - loss: 0.4089 - categorical_accuracy: 0.8393 - val_loss: 0.4308 - val_categorical_accuracy: 0.8991
Epoch 113/200
26/26 [==============================] - 1s 29ms/step - loss: 0.3860 - categorical_accuracy: 0.8714 - val_loss: 0.4513 - val_categorical_accuracy: 0.8876
Epoch 114/200
26/26 [==============================] - 1s 32ms/step - loss: 0.3842 - categorical_accuracy: 0.8480 - val_loss: 0.4561 - val_categorical_accuracy: 0.8876
Epoch 115/200
26/26 [==============================] - 1s 27ms/step - loss: 0.4108 - categorical_accuracy: 0.8381 - val_loss: 0.4547 - val_categorical_accuracy: 0.8905
Epoch 116/200
26/26 [==============================] - 1s 21ms/step - loss: 0.3910 - categorical_accuracy: 0.8578 - val_loss: 0.4253 - val_categorical_accuracy: 0.8847
Epoch 117/200
26/26 [==============================] - 1s 32ms/step - loss: 0.4279 - categorical_accuracy: 0.8344 - val_loss: 0.4318 - val_categorical_accuracy: 0.9049
Epoch 118/200
26/26 [==============================] - 1s 30ms/step - loss: 0.4310 - categorical_accuracy: 0.8504 - val_loss: 0.4003 - val_categorical_accuracy: 0.8991
Epoch 119/200
26/26 [==============================] - 1s 28ms/step - loss: 0.3939 - categorical_accuracy: 0.8529 - val_loss: 0.3826 - val_categorical_accuracy: 0.8991
Epoch 120/200
26/26 [==============================] - 1s 22ms/step - loss: 0.3852 - categorical_accuracy: 0.8640 - val_loss: 0.4130 - val_categorical_accuracy: 0.8876
Epoch 121/200
26/26 [==============================] - 1s 30ms/step - loss: 0.3712 - categorical_accuracy: 0.8616 - val_loss: 0.4605 - val_categorical_accuracy: 0.8934
Epoch 122/200
26/26 [==============================] - 1s 35ms/step - loss: 0.3920 - categorical_accuracy: 0.8430 - val_loss: 0.4400 - val_categorical_accuracy: 0.8963
Epoch 123/200
26/26 [==============================] - 1s 26ms/step - loss: 0.4025 - categorical_accuracy: 0.8430 - val_loss: 0.4150 - val_categorical_accuracy: 0.9049
Epoch 124/200
26/26 [==============================] - 1s 21ms/step - loss: 0.3489 - categorical_accuracy: 0.8739 - val_loss: 0.4391 - val_categorical_accuracy: 0.8991
Epoch 125/200
26/26 [==============================] - 1s 26ms/step - loss: 0.3929 - categorical_accuracy: 0.8467 - val_loss: 0.4186 - val_categorical_accuracy: 0.8905
Epoch 126/200
26/26 [==============================] - 1s 27ms/step - loss: 0.4083 - categorical_accuracy: 0.8331 - val_loss: 0.4384 - val_categorical_accuracy: 0.8847
Epoch 127/200
26/26 [==============================] - 1s 25ms/step - loss: 0.3999 - categorical_accuracy: 0.8603 - val_loss: 0.4450 - val_categorical_accuracy: 0.8847
Epoch 128/200
26/26 [==============================] - 1s 22ms/step - loss: 0.3607 - categorical_accuracy: 0.8739 - val_loss: 0.4346 - val_categorical_accuracy: 0.8905
Epoch 129/200
26/26 [==============================] - 1s 21ms/step - loss: 0.3705 - categorical_accuracy: 0.8529 - val_loss: 0.4389 - val_categorical_accuracy: 0.8963
Epoch 130/200
26/26 [==============================] - 1s 30ms/step - loss: 0.3883 - categorical_accuracy: 0.8541 - val_loss: 0.4654 - val_categorical_accuracy: 0.8934
Epoch 131/200
26/26 [==============================] - 1s 33ms/step - loss: 0.3766 - categorical_accuracy: 0.8628 - val_loss: 0.4119 - val_categorical_accuracy: 0.8991
Epoch 132/200
26/26 [==============================] - 1s 27ms/step - loss: 0.4098 - categorical_accuracy: 0.8430 - val_loss: 0.4334 - val_categorical_accuracy: 0.8991
Epoch 133/200
26/26 [==============================] - 0s 17ms/step - loss: 0.3893 - categorical_accuracy: 0.8517 - val_loss: 0.4296 - val_categorical_accuracy: 0.8934
Epoch 134/200
26/26 [==============================] - 1s 28ms/step - loss: 0.3678 - categorical_accuracy: 0.8554 - val_loss: 0.4414 - val_categorical_accuracy: 0.8905
Epoch 135/200
26/26 [==============================] - 1s 30ms/step - loss: 0.3977 - categorical_accuracy: 0.8492 - val_loss: 0.4358 - val_categorical_accuracy: 0.8905
Epoch 136/200
26/26 [==============================] - 1s 29ms/step - loss: 0.3847 - categorical_accuracy: 0.8578 - val_loss: 0.4274 - val_categorical_accuracy: 0.8876
Epoch 137/200
26/26 [==============================] - 1s 22ms/step - loss: 0.3589 - categorical_accuracy: 0.8591 - val_loss: 0.4167 - val_categorical_accuracy: 0.8963
Epoch 138/200
26/26 [==============================] - 1s 23ms/step - loss: 0.3723 - categorical_accuracy: 0.8677 - val_loss: 0.4068 - val_categorical_accuracy: 0.8876
Epoch 139/200
26/26 [==============================] - 1s 31ms/step - loss: 0.3443 - categorical_accuracy: 0.8739 - val_loss: 0.4424 - val_categorical_accuracy: 0.8818
Epoch 140/200
26/26 [==============================] - 1s 27ms/step - loss: 0.4160 - categorical_accuracy: 0.8492 - val_loss: 0.4068 - val_categorical_accuracy: 0.8991
Epoch 141/200
26/26 [==============================] - 1s 21ms/step - loss: 0.3995 - categorical_accuracy: 0.8566 - val_loss: 0.5557 - val_categorical_accuracy: 0.8963
Epoch 142/200
26/26 [==============================] - 0s 17ms/step - loss: 0.4029 - categorical_accuracy: 0.8541 - val_loss: 0.4871 - val_categorical_accuracy: 0.8905
Epoch 143/200
26/26 [==============================] - 1s 32ms/step - loss: 0.3610 - categorical_accuracy: 0.8727 - val_loss: 0.4782 - val_categorical_accuracy: 0.8934
Epoch 144/200
26/26 [==============================] - 1s 31ms/step - loss: 0.3579 - categorical_accuracy: 0.8789 - val_loss: 0.5024 - val_categorical_accuracy: 0.9020
Epoch 145/200
26/26 [==============================] - 1s 22ms/step - loss: 0.4106 - categorical_accuracy: 0.8405 - val_loss: 0.4718 - val_categorical_accuracy: 0.8905
Epoch 146/200
26/26 [==============================] - 1s 20ms/step - loss: 0.3696 - categorical_accuracy: 0.8578 - val_loss: 0.4913 - val_categorical_accuracy: 0.9020
Epoch 147/200
26/26 [==============================] - 1s 36ms/step - loss: 0.3500 - categorical_accuracy: 0.8752 - val_loss: 0.4630 - val_categorical_accuracy: 0.8732
Epoch 148/200
26/26 [==============================] - 1s 32ms/step - loss: 0.3690 - categorical_accuracy: 0.8517 - val_loss: 0.4823 - val_categorical_accuracy: 0.8876
Epoch 149/200
26/26 [==============================] - 1s 23ms/step - loss: 0.3641 - categorical_accuracy: 0.8640 - val_loss: 0.4607 - val_categorical_accuracy: 0.8991
Epoch 150/200
26/26 [==============================] - 1s 20ms/step - loss: 0.3969 - categorical_accuracy: 0.8628 - val_loss: 0.4564 - val_categorical_accuracy: 0.8905
Epoch 151/200
26/26 [==============================] - 1s 22ms/step - loss: 0.4150 - categorical_accuracy: 0.8443 - val_loss: 0.4684 - val_categorical_accuracy: 0.8847
Epoch 152/200
26/26 [==============================] - 1s 31ms/step - loss: 0.4087 - categorical_accuracy: 0.8517 - val_loss: 0.4811 - val_categorical_accuracy: 0.8963
Epoch 153/200
26/26 [==============================] - 1s 28ms/step - loss: 0.3493 - categorical_accuracy: 0.8616 - val_loss: 0.4953 - val_categorical_accuracy: 0.8847
Epoch 154/200
26/26 [==============================] - 1s 20ms/step - loss: 0.3988 - categorical_accuracy: 0.8467 - val_loss: 0.4212 - val_categorical_accuracy: 0.8963
Epoch 155/200
26/26 [==============================] - 1s 25ms/step - loss: 0.3348 - categorical_accuracy: 0.8776 - val_loss: 0.4741 - val_categorical_accuracy: 0.8905
Epoch 156/200
26/26 [==============================] - 1s 25ms/step - loss: 0.3489 - categorical_accuracy: 0.8628 - val_loss: 0.4781 - val_categorical_accuracy: 0.9078
Epoch 157/200
26/26 [==============================] - 1s 31ms/step - loss: 0.3820 - categorical_accuracy: 0.8455 - val_loss: 0.4519 - val_categorical_accuracy: 0.8934
Epoch 158/200
26/26 [==============================] - 1s 24ms/step - loss: 0.3291 - categorical_accuracy: 0.8764 - val_loss: 0.4497 - val_categorical_accuracy: 0.9020
Epoch 159/200
26/26 [==============================] - 1s 21ms/step - loss: 0.3893 - categorical_accuracy: 0.8492 - val_loss: 0.4392 - val_categorical_accuracy: 0.8847
Epoch 160/200
26/26 [==============================] - 1s 21ms/step - loss: 0.3528 - categorical_accuracy: 0.8628 - val_loss: 0.4513 - val_categorical_accuracy: 0.8905
Epoch 161/200
26/26 [==============================] - 1s 27ms/step - loss: 0.4173 - categorical_accuracy: 0.8356 - val_loss: 0.3991 - val_categorical_accuracy: 0.9078
Epoch 162/200
26/26 [==============================] - 1s 20ms/step - loss: 0.3597 - categorical_accuracy: 0.8752 - val_loss: 0.4061 - val_categorical_accuracy: 0.8934
Epoch 163/200
26/26 [==============================] - 1s 23ms/step - loss: 0.3896 - categorical_accuracy: 0.8677 - val_loss: 0.3999 - val_categorical_accuracy: 0.8934
Epoch 164/200
26/26 [==============================] - 0s 18ms/step - loss: 0.3495 - categorical_accuracy: 0.8764 - val_loss: 0.4405 - val_categorical_accuracy: 0.8818
Epoch 165/200
26/26 [==============================] - 1s 27ms/step - loss: 0.3853 - categorical_accuracy: 0.8603 - val_loss: 0.4105 - val_categorical_accuracy: 0.9020
Epoch 166/200
26/26 [==============================] - 1s 22ms/step - loss: 0.3320 - categorical_accuracy: 0.8789 - val_loss: 0.4206 - val_categorical_accuracy: 0.9020
Epoch 167/200
26/26 [==============================] - 1s 26ms/step - loss: 0.3418 - categorical_accuracy: 0.8690 - val_loss: 0.4249 - val_categorical_accuracy: 0.8963
Epoch 168/200
26/26 [==============================] - 1s 23ms/step - loss: 0.3140 - categorical_accuracy: 0.8801 - val_loss: 0.4250 - val_categorical_accuracy: 0.8991
Epoch 169/200
26/26 [==============================] - 1s 22ms/step - loss: 0.3600 - categorical_accuracy: 0.8566 - val_loss: 0.4256 - val_categorical_accuracy: 0.8876
Epoch 170/200
26/26 [==============================] - 1s 28ms/step - loss: 0.3642 - categorical_accuracy: 0.8764 - val_loss: 0.5353 - val_categorical_accuracy: 0.8934
Epoch 171/200
26/26 [==============================] - 1s 26ms/step - loss: 0.3705 - categorical_accuracy: 0.8665 - val_loss: 0.4561 - val_categorical_accuracy: 0.8934
Epoch 172/200
26/26 [==============================] - 1s 23ms/step - loss: 0.3487 - categorical_accuracy: 0.8578 - val_loss: 0.4706 - val_categorical_accuracy: 0.8818
Epoch 173/200
26/26 [==============================] - 1s 20ms/step - loss: 0.3609 - categorical_accuracy: 0.8640 - val_loss: 0.4789 - val_categorical_accuracy: 0.8818
Epoch 174/200
26/26 [==============================] - 1s 25ms/step - loss: 0.3367 - categorical_accuracy: 0.8739 - val_loss: 0.4660 - val_categorical_accuracy: 0.8847
Epoch 175/200
26/26 [==============================] - 1s 26ms/step - loss: 0.3430 - categorical_accuracy: 0.8813 - val_loss: 0.4473 - val_categorical_accuracy: 0.8963
Epoch 176/200
26/26 [==============================] - 1s 29ms/step - loss: 0.3784 - categorical_accuracy: 0.8529 - val_loss: 0.4612 - val_categorical_accuracy: 0.8991
Epoch 177/200
26/26 [==============================] - 0s 17ms/step - loss: 0.3833 - categorical_accuracy: 0.8504 - val_loss: 0.4861 - val_categorical_accuracy: 0.8905
Epoch 178/200
26/26 [==============================] - 0s 17ms/step - loss: 0.3445 - categorical_accuracy: 0.8789 - val_loss: 0.4788 - val_categorical_accuracy: 0.8963
Epoch 179/200
26/26 [==============================] - 1s 24ms/step - loss: 0.3250 - categorical_accuracy: 0.8776 - val_loss: 0.4491 - val_categorical_accuracy: 0.8991
Epoch 180/200
26/26 [==============================] - 1s 25ms/step - loss: 0.3432 - categorical_accuracy: 0.8764 - val_loss: 0.4003 - val_categorical_accuracy: 0.9020
Epoch 181/200
26/26 [==============================] - 1s 30ms/step - loss: 0.3143 - categorical_accuracy: 0.8801 - val_loss: 0.4481 - val_categorical_accuracy: 0.8847
Epoch 182/200
26/26 [==============================] - 1s 19ms/step - loss: 0.3189 - categorical_accuracy: 0.8838 - val_loss: 0.4827 - val_categorical_accuracy: 0.8991
Epoch 183/200
26/26 [==============================] - 1s 21ms/step - loss: 0.3370 - categorical_accuracy: 0.8677 - val_loss: 0.4496 - val_categorical_accuracy: 0.8963
Epoch 184/200
26/26 [==============================] - 1s 22ms/step - loss: 0.3321 - categorical_accuracy: 0.8789 - val_loss: 0.4323 - val_categorical_accuracy: 0.8963
Epoch 185/200
26/26 [==============================] - 1s 23ms/step - loss: 0.3390 - categorical_accuracy: 0.8826 - val_loss: 0.4555 - val_categorical_accuracy: 0.8963
Epoch 186/200
26/26 [==============================] - 1s 27ms/step - loss: 0.3377 - categorical_accuracy: 0.8764 - val_loss: 0.4649 - val_categorical_accuracy: 0.9078
Epoch 187/200
26/26 [==============================] - 1s 22ms/step - loss: 0.3404 - categorical_accuracy: 0.8727 - val_loss: 0.4119 - val_categorical_accuracy: 0.8790
Epoch 188/200
26/26 [==============================] - 1s 23ms/step - loss: 0.2961 - categorical_accuracy: 0.9048 - val_loss: 0.3862 - val_categorical_accuracy: 0.9078
Epoch 189/200
26/26 [==============================] - 1s 26ms/step - loss: 0.3366 - categorical_accuracy: 0.8838 - val_loss: 0.4846 - val_categorical_accuracy: 0.8818
Epoch 190/200
26/26 [==============================] - 1s 27ms/step - loss: 0.3474 - categorical_accuracy: 0.8578 - val_loss: 0.4242 - val_categorical_accuracy: 0.8905
Epoch 191/200
26/26 [==============================] - 1s 26ms/step - loss: 0.3515 - categorical_accuracy: 0.8752 - val_loss: 0.4053 - val_categorical_accuracy: 0.9107
Epoch 192/200
26/26 [==============================] - 1s 23ms/step - loss: 0.3586 - categorical_accuracy: 0.8628 - val_loss: 0.4119 - val_categorical_accuracy: 0.8934
Epoch 193/200
26/26 [==============================] - 1s 28ms/step - loss: 0.3299 - categorical_accuracy: 0.8714 - val_loss: 0.4464 - val_categorical_accuracy: 0.8905
Epoch 194/200
26/26 [==============================] - 1s 24ms/step - loss: 0.3615 - categorical_accuracy: 0.8554 - val_loss: 0.4469 - val_categorical_accuracy: 0.9020
Epoch 195/200
26/26 [==============================] - 1s 23ms/step - loss: 0.3489 - categorical_accuracy: 0.8727 - val_loss: 0.4722 - val_categorical_accuracy: 0.9020
Epoch 196/200
26/26 [==============================] - 1s 21ms/step - loss: 0.3209 - categorical_accuracy: 0.8752 - val_loss: 0.4409 - val_categorical_accuracy: 0.9049
Epoch 197/200
26/26 [==============================] - 1s 23ms/step - loss: 0.3121 - categorical_accuracy: 0.8850 - val_loss: 0.4501 - val_categorical_accuracy: 0.8847
Epoch 198/200
26/26 [==============================] - 1s 28ms/step - loss: 0.3677 - categorical_accuracy: 0.8665 - val_loss: 0.4198 - val_categorical_accuracy: 0.9020
Epoch 199/200
26/26 [==============================] - 1s 23ms/step - loss: 0.3281 - categorical_accuracy: 0.8776 - val_loss: 0.4320 - val_categorical_accuracy: 0.8991
Epoch 200/200
26/26 [==============================] - 1s 22ms/step - loss: 0.3392 - categorical_accuracy: 0.8752 - val_loss: 0.4182 - val_categorical_accuracy: 0.9020
In [17]:
model.evaluate(X_test_lstm,y_test_re)
11/11 [==============================] - 0s 13ms/step - loss: 0.4182 - categorical_accuracy: 0.9020
Out[17]:
[0.4181867241859436, 0.9020172953605652]
In [18]:
#Plotting Accuracy and loss Curve
%matplotlib inline
import matplotlib.pyplot as plt
acc= history_model_lstm.history['categorical_accuracy']
val_acc = history_model_lstm.history['val_categorical_accuracy']
loss = history_model_lstm.history['loss']
val_loss = history_model_lstm.history['val_loss']


epochs = range(len(acc))

plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Testing accuracy')
plt.title('Training and Testing accuracy - LSTM')
plt.legend()
plt.figure()

plt.plot(epochs, loss,'r', label='Training Loss')
plt.plot(epochs,val_loss,'b', label='Testing Loss')
plt.title('Training and Testing loss - LSTM')
plt.legend()

plt.show()
In [19]:
# Confusion report
from sklearn.metrics import confusion_matrix,classification_report
y_predicted = model.predict(X_test_lstm)
y_pred=[]
for i in y_predicted:
    y_pred.append(np.argmax(i))
y_pred1=pd.Series(y_pred)
y_pred1
print(classification_report(y_test,y_pred1))
11/11 [==============================] - 1s 9ms/step
              precision    recall  f1-score   support

           0       1.00      0.95      0.98        88
           1       0.92      0.91      0.91        65
           2       0.81      0.75      0.78        52
           3       0.92      0.92      0.92        59
           4       0.84      0.93      0.88        83

    accuracy                           0.90       347
   macro avg       0.90      0.89      0.89       347
weighted avg       0.90      0.90      0.90       347

In [20]:
# Confusion metric
from sklearn import metrics
import seaborn as sns
cm=metrics.confusion_matrix(y_test,y_pred1)
# crete seaborn heatmap with required labels
sns.heatmap(cm,annot =True,cmap='Reds', fmt='g',xticklabels=yoga_pose_dict.values(), yticklabels=yoga_pose_dict.values())
Out[20]:
<AxesSubplot:>

1D CNN Model Implementation¶

In [21]:
# Reshaping the input array before applying to 1D CNN
X_train_re=np.array(X_train)
X_test_re=np.array(X_test)
sample_size=X_train_re.shape[0]
time_steps=X_test_re.shape[1]
input_dim=1
X_train_re=X_train_re.reshape(sample_size,time_steps,input_dim)
X_train_re.shape
sample_size1=X_test_re.shape[0]
time_steps1=X_test_re.shape[1]
input_dim1=1
X_test_re=X_test_re.reshape(sample_size1,time_steps1,input_dim1)
X_test_re.shape
Out[21]:
(347, 99, 1)
In [22]:
# 1D CNN model Implementation

model_cnn = Sequential()
model_cnn.add(Conv1D(128,kernel_size=3,input_shape=(X_train_re.shape[1],1)))
model_cnn.add(Dropout(0.5))
model_cnn.add(MaxPooling1D(pool_size=1,name='MaxPooling1D'))
model_cnn.add(Flatten())
model_cnn.add(Dropout(0.5))
model_cnn.add(Dense(64, activation='relu'))
model_cnn.add(Dense(8, activation='relu'))
model_cnn.add(Dense(5, activation='softmax'))
# Compiling the model
model_cnn.compile(optimizer='Adam',loss='categorical_crossentropy', metrics=['categorical_accuracy'])
# Fitting the model on train data
history_model_cnn=model_cnn.fit(X_train_re, y_train_re, epochs=200,validation_data=(X_test_re,y_test_re))
Epoch 1/200
26/26 [==============================] - 4s 75ms/step - loss: 1.3109 - categorical_accuracy: 0.4190 - val_loss: 1.1617 - val_categorical_accuracy: 0.4841
Epoch 2/200
26/26 [==============================] - 1s 50ms/step - loss: 1.1495 - categorical_accuracy: 0.5068 - val_loss: 1.0590 - val_categorical_accuracy: 0.5562
Epoch 3/200
26/26 [==============================] - 1s 53ms/step - loss: 1.0605 - categorical_accuracy: 0.5847 - val_loss: 0.9913 - val_categorical_accuracy: 0.5533
Epoch 4/200
26/26 [==============================] - 1s 53ms/step - loss: 0.9884 - categorical_accuracy: 0.6020 - val_loss: 0.9095 - val_categorical_accuracy: 0.6225
Epoch 5/200
26/26 [==============================] - 1s 50ms/step - loss: 0.9225 - categorical_accuracy: 0.6700 - val_loss: 0.8558 - val_categorical_accuracy: 0.6945
Epoch 6/200
26/26 [==============================] - 2s 59ms/step - loss: 0.8513 - categorical_accuracy: 0.7108 - val_loss: 0.7730 - val_categorical_accuracy: 0.7118
Epoch 7/200
26/26 [==============================] - 1s 46ms/step - loss: 0.7808 - categorical_accuracy: 0.7429 - val_loss: 0.7305 - val_categorical_accuracy: 0.7205
Epoch 8/200
26/26 [==============================] - 2s 59ms/step - loss: 0.7509 - categorical_accuracy: 0.7318 - val_loss: 0.6894 - val_categorical_accuracy: 0.7464
Epoch 9/200
26/26 [==============================] - 1s 44ms/step - loss: 0.7028 - categorical_accuracy: 0.7577 - val_loss: 0.6549 - val_categorical_accuracy: 0.6916
Epoch 10/200
26/26 [==============================] - 2s 59ms/step - loss: 0.6549 - categorical_accuracy: 0.7070 - val_loss: 0.5565 - val_categorical_accuracy: 0.7695
Epoch 11/200
26/26 [==============================] - 1s 46ms/step - loss: 0.5598 - categorical_accuracy: 0.8035 - val_loss: 0.5123 - val_categorical_accuracy: 0.8674
Epoch 12/200
26/26 [==============================] - 1s 53ms/step - loss: 0.5170 - categorical_accuracy: 0.8443 - val_loss: 0.4842 - val_categorical_accuracy: 0.8847
Epoch 13/200
26/26 [==============================] - 1s 54ms/step - loss: 0.5130 - categorical_accuracy: 0.8504 - val_loss: 0.4489 - val_categorical_accuracy: 0.8847
Epoch 14/200
26/26 [==============================] - 1s 53ms/step - loss: 0.4505 - categorical_accuracy: 0.8714 - val_loss: 0.4299 - val_categorical_accuracy: 0.8732
Epoch 15/200
26/26 [==============================] - 1s 50ms/step - loss: 0.4308 - categorical_accuracy: 0.8591 - val_loss: 0.4330 - val_categorical_accuracy: 0.8761
Epoch 16/200
26/26 [==============================] - 1s 50ms/step - loss: 0.4173 - categorical_accuracy: 0.8702 - val_loss: 0.3985 - val_categorical_accuracy: 0.8790
Epoch 17/200
26/26 [==============================] - 2s 57ms/step - loss: 0.4013 - categorical_accuracy: 0.8690 - val_loss: 0.4030 - val_categorical_accuracy: 0.8732
Epoch 18/200
26/26 [==============================] - 1s 47ms/step - loss: 0.3982 - categorical_accuracy: 0.8801 - val_loss: 0.4100 - val_categorical_accuracy: 0.8905
Epoch 19/200
26/26 [==============================] - 2s 60ms/step - loss: 0.4073 - categorical_accuracy: 0.8566 - val_loss: 0.4331 - val_categorical_accuracy: 0.8674
Epoch 20/200
26/26 [==============================] - 1s 45ms/step - loss: 0.3973 - categorical_accuracy: 0.8677 - val_loss: 0.4066 - val_categorical_accuracy: 0.8876
Epoch 21/200
26/26 [==============================] - 2s 60ms/step - loss: 0.3690 - categorical_accuracy: 0.8826 - val_loss: 0.4155 - val_categorical_accuracy: 0.8761
Epoch 22/200
26/26 [==============================] - 1s 56ms/step - loss: 0.3680 - categorical_accuracy: 0.8752 - val_loss: 0.3999 - val_categorical_accuracy: 0.8818
Epoch 23/200
26/26 [==============================] - 1s 55ms/step - loss: 0.3593 - categorical_accuracy: 0.8714 - val_loss: 0.3769 - val_categorical_accuracy: 0.8905
Epoch 24/200
26/26 [==============================] - 1s 51ms/step - loss: 0.3366 - categorical_accuracy: 0.8912 - val_loss: 0.3975 - val_categorical_accuracy: 0.8818
Epoch 25/200
26/26 [==============================] - 1s 45ms/step - loss: 0.3313 - categorical_accuracy: 0.8813 - val_loss: 0.4188 - val_categorical_accuracy: 0.8732
Epoch 26/200
26/26 [==============================] - 1s 49ms/step - loss: 0.3382 - categorical_accuracy: 0.8813 - val_loss: 0.3605 - val_categorical_accuracy: 0.9020
Epoch 27/200
26/26 [==============================] - 1s 46ms/step - loss: 0.3105 - categorical_accuracy: 0.8962 - val_loss: 0.4004 - val_categorical_accuracy: 0.8847
Epoch 28/200
26/26 [==============================] - 1s 51ms/step - loss: 0.3242 - categorical_accuracy: 0.8776 - val_loss: 0.3466 - val_categorical_accuracy: 0.8991
Epoch 29/200
26/26 [==============================] - 1s 45ms/step - loss: 0.3013 - categorical_accuracy: 0.8986 - val_loss: 0.3468 - val_categorical_accuracy: 0.8934
Epoch 30/200
26/26 [==============================] - 1s 55ms/step - loss: 0.3080 - categorical_accuracy: 0.8962 - val_loss: 0.3644 - val_categorical_accuracy: 0.8991
Epoch 31/200
26/26 [==============================] - 1s 47ms/step - loss: 0.2934 - categorical_accuracy: 0.9073 - val_loss: 0.3653 - val_categorical_accuracy: 0.8934
Epoch 32/200
26/26 [==============================] - 1s 44ms/step - loss: 0.2900 - categorical_accuracy: 0.9023 - val_loss: 0.3464 - val_categorical_accuracy: 0.9107
Epoch 33/200
26/26 [==============================] - 1s 44ms/step - loss: 0.2644 - categorical_accuracy: 0.9172 - val_loss: 0.3316 - val_categorical_accuracy: 0.9078
Epoch 34/200
26/26 [==============================] - 1s 42ms/step - loss: 0.2897 - categorical_accuracy: 0.8912 - val_loss: 0.3526 - val_categorical_accuracy: 0.8905
Epoch 35/200
26/26 [==============================] - 2s 58ms/step - loss: 0.2650 - categorical_accuracy: 0.9135 - val_loss: 0.3463 - val_categorical_accuracy: 0.8991
Epoch 36/200
26/26 [==============================] - 1s 49ms/step - loss: 0.2688 - categorical_accuracy: 0.9147 - val_loss: 0.3588 - val_categorical_accuracy: 0.8790
Epoch 37/200
26/26 [==============================] - 1s 52ms/step - loss: 0.2886 - categorical_accuracy: 0.9023 - val_loss: 0.3308 - val_categorical_accuracy: 0.8905
Epoch 38/200
26/26 [==============================] - 1s 49ms/step - loss: 0.2554 - categorical_accuracy: 0.9135 - val_loss: 0.3239 - val_categorical_accuracy: 0.9020
Epoch 39/200
26/26 [==============================] - 1s 46ms/step - loss: 0.2712 - categorical_accuracy: 0.9048 - val_loss: 0.3467 - val_categorical_accuracy: 0.8905
Epoch 40/200
26/26 [==============================] - 2s 56ms/step - loss: 0.2555 - categorical_accuracy: 0.9122 - val_loss: 0.3387 - val_categorical_accuracy: 0.8934
Epoch 41/200
26/26 [==============================] - 1s 43ms/step - loss: 0.2463 - categorical_accuracy: 0.9184 - val_loss: 0.3136 - val_categorical_accuracy: 0.8991
Epoch 42/200
26/26 [==============================] - 2s 60ms/step - loss: 0.2320 - categorical_accuracy: 0.9184 - val_loss: 0.3314 - val_categorical_accuracy: 0.9049
Epoch 43/200
26/26 [==============================] - 1s 48ms/step - loss: 0.2491 - categorical_accuracy: 0.9184 - val_loss: 0.3188 - val_categorical_accuracy: 0.9049
Epoch 44/200
26/26 [==============================] - 1s 50ms/step - loss: 0.2563 - categorical_accuracy: 0.9073 - val_loss: 0.3369 - val_categorical_accuracy: 0.9078
Epoch 45/200
26/26 [==============================] - 1s 51ms/step - loss: 0.2658 - categorical_accuracy: 0.9159 - val_loss: 0.3155 - val_categorical_accuracy: 0.9078
Epoch 46/200
26/26 [==============================] - 1s 52ms/step - loss: 0.2474 - categorical_accuracy: 0.9197 - val_loss: 0.3122 - val_categorical_accuracy: 0.9049
Epoch 47/200
26/26 [==============================] - 1s 52ms/step - loss: 0.2461 - categorical_accuracy: 0.9209 - val_loss: 0.3221 - val_categorical_accuracy: 0.9020
Epoch 48/200
26/26 [==============================] - 1s 47ms/step - loss: 0.2313 - categorical_accuracy: 0.9234 - val_loss: 0.3188 - val_categorical_accuracy: 0.8905
Epoch 49/200
26/26 [==============================] - 1s 54ms/step - loss: 0.2279 - categorical_accuracy: 0.9320 - val_loss: 0.3330 - val_categorical_accuracy: 0.8963
Epoch 50/200
26/26 [==============================] - 1s 50ms/step - loss: 0.2584 - categorical_accuracy: 0.9147 - val_loss: 0.2926 - val_categorical_accuracy: 0.9135
Epoch 51/200
26/26 [==============================] - 1s 54ms/step - loss: 0.2313 - categorical_accuracy: 0.9221 - val_loss: 0.3335 - val_categorical_accuracy: 0.8934
Epoch 52/200
26/26 [==============================] - 1s 48ms/step - loss: 0.2241 - categorical_accuracy: 0.9159 - val_loss: 0.3124 - val_categorical_accuracy: 0.9020
Epoch 53/200
26/26 [==============================] - 1s 55ms/step - loss: 0.2089 - categorical_accuracy: 0.9295 - val_loss: 0.3169 - val_categorical_accuracy: 0.8934
Epoch 54/200
26/26 [==============================] - 1s 54ms/step - loss: 0.2008 - categorical_accuracy: 0.9308 - val_loss: 0.3534 - val_categorical_accuracy: 0.8963
Epoch 55/200
26/26 [==============================] - 1s 53ms/step - loss: 0.2296 - categorical_accuracy: 0.9258 - val_loss: 0.3201 - val_categorical_accuracy: 0.9020
Epoch 56/200
26/26 [==============================] - 1s 50ms/step - loss: 0.2440 - categorical_accuracy: 0.9184 - val_loss: 0.3140 - val_categorical_accuracy: 0.8991
Epoch 57/200
26/26 [==============================] - 1s 47ms/step - loss: 0.2216 - categorical_accuracy: 0.9147 - val_loss: 0.3303 - val_categorical_accuracy: 0.9020
Epoch 58/200
26/26 [==============================] - 2s 59ms/step - loss: 0.2078 - categorical_accuracy: 0.9320 - val_loss: 0.3271 - val_categorical_accuracy: 0.8934
Epoch 59/200
26/26 [==============================] - 1s 45ms/step - loss: 0.2074 - categorical_accuracy: 0.9258 - val_loss: 0.3173 - val_categorical_accuracy: 0.8991
Epoch 60/200
26/26 [==============================] - 2s 62ms/step - loss: 0.1977 - categorical_accuracy: 0.9320 - val_loss: 0.4013 - val_categorical_accuracy: 0.8905
Epoch 61/200
26/26 [==============================] - 1s 43ms/step - loss: 0.2093 - categorical_accuracy: 0.9320 - val_loss: 0.3138 - val_categorical_accuracy: 0.8934
Epoch 62/200
26/26 [==============================] - 1s 51ms/step - loss: 0.1887 - categorical_accuracy: 0.9382 - val_loss: 0.3408 - val_categorical_accuracy: 0.8991
Epoch 63/200
26/26 [==============================] - 1s 52ms/step - loss: 0.1870 - categorical_accuracy: 0.9308 - val_loss: 0.3007 - val_categorical_accuracy: 0.9078
Epoch 64/200
26/26 [==============================] - 1s 49ms/step - loss: 0.2069 - categorical_accuracy: 0.9357 - val_loss: 0.3159 - val_categorical_accuracy: 0.9020
Epoch 65/200
26/26 [==============================] - 1s 52ms/step - loss: 0.1957 - categorical_accuracy: 0.9320 - val_loss: 0.2855 - val_categorical_accuracy: 0.9107
Epoch 66/200
26/26 [==============================] - 1s 45ms/step - loss: 0.2049 - categorical_accuracy: 0.9333 - val_loss: 0.3215 - val_categorical_accuracy: 0.9049
Epoch 67/200
26/26 [==============================] - 1s 54ms/step - loss: 0.1693 - categorical_accuracy: 0.9506 - val_loss: 0.3372 - val_categorical_accuracy: 0.8991
Epoch 68/200
26/26 [==============================] - 1s 47ms/step - loss: 0.1823 - categorical_accuracy: 0.9357 - val_loss: 0.3380 - val_categorical_accuracy: 0.8876
Epoch 69/200
26/26 [==============================] - 2s 61ms/step - loss: 0.1856 - categorical_accuracy: 0.9345 - val_loss: 0.3743 - val_categorical_accuracy: 0.8847
Epoch 70/200
26/26 [==============================] - 1s 45ms/step - loss: 0.2104 - categorical_accuracy: 0.9271 - val_loss: 0.2966 - val_categorical_accuracy: 0.8905
Epoch 71/200
26/26 [==============================] - 1s 52ms/step - loss: 0.1813 - categorical_accuracy: 0.9345 - val_loss: 0.3007 - val_categorical_accuracy: 0.9049
Epoch 72/200
26/26 [==============================] - 1s 47ms/step - loss: 0.1863 - categorical_accuracy: 0.9456 - val_loss: 0.3151 - val_categorical_accuracy: 0.8991
Epoch 73/200
26/26 [==============================] - 2s 58ms/step - loss: 0.1938 - categorical_accuracy: 0.9419 - val_loss: 0.3288 - val_categorical_accuracy: 0.8991
Epoch 74/200
26/26 [==============================] - 1s 47ms/step - loss: 0.1701 - categorical_accuracy: 0.9456 - val_loss: 0.2907 - val_categorical_accuracy: 0.9020
Epoch 75/200
26/26 [==============================] - 1s 50ms/step - loss: 0.1770 - categorical_accuracy: 0.9444 - val_loss: 0.3124 - val_categorical_accuracy: 0.8991
Epoch 76/200
26/26 [==============================] - 2s 56ms/step - loss: 0.1798 - categorical_accuracy: 0.9394 - val_loss: 0.3205 - val_categorical_accuracy: 0.8963
Epoch 77/200
26/26 [==============================] - 1s 47ms/step - loss: 0.1786 - categorical_accuracy: 0.9431 - val_loss: 0.3351 - val_categorical_accuracy: 0.8963
Epoch 78/200
26/26 [==============================] - 1s 55ms/step - loss: 0.1666 - categorical_accuracy: 0.9407 - val_loss: 0.3053 - val_categorical_accuracy: 0.9049
Epoch 79/200
26/26 [==============================] - 1s 47ms/step - loss: 0.1746 - categorical_accuracy: 0.9431 - val_loss: 0.3086 - val_categorical_accuracy: 0.9078
Epoch 80/200
26/26 [==============================] - 1s 53ms/step - loss: 0.1643 - categorical_accuracy: 0.9444 - val_loss: 0.3037 - val_categorical_accuracy: 0.9049
Epoch 81/200
26/26 [==============================] - 1s 49ms/step - loss: 0.1654 - categorical_accuracy: 0.9456 - val_loss: 0.3409 - val_categorical_accuracy: 0.8991
Epoch 82/200
26/26 [==============================] - 1s 47ms/step - loss: 0.1771 - categorical_accuracy: 0.9345 - val_loss: 0.3051 - val_categorical_accuracy: 0.8991
Epoch 83/200
26/26 [==============================] - 1s 47ms/step - loss: 0.1585 - categorical_accuracy: 0.9506 - val_loss: 0.2834 - val_categorical_accuracy: 0.9078
Epoch 84/200
26/26 [==============================] - 1s 48ms/step - loss: 0.1627 - categorical_accuracy: 0.9543 - val_loss: 0.2938 - val_categorical_accuracy: 0.9107
Epoch 85/200
26/26 [==============================] - 1s 51ms/step - loss: 0.1395 - categorical_accuracy: 0.9580 - val_loss: 0.2927 - val_categorical_accuracy: 0.9020
Epoch 86/200
26/26 [==============================] - 1s 47ms/step - loss: 0.1361 - categorical_accuracy: 0.9592 - val_loss: 0.2772 - val_categorical_accuracy: 0.9078
Epoch 87/200
26/26 [==============================] - 2s 62ms/step - loss: 0.1721 - categorical_accuracy: 0.9345 - val_loss: 0.3493 - val_categorical_accuracy: 0.8905
Epoch 88/200
26/26 [==============================] - 1s 47ms/step - loss: 0.1507 - categorical_accuracy: 0.9555 - val_loss: 0.2966 - val_categorical_accuracy: 0.9078
Epoch 89/200
26/26 [==============================] - 1s 54ms/step - loss: 0.1480 - categorical_accuracy: 0.9506 - val_loss: 0.2945 - val_categorical_accuracy: 0.9020
Epoch 90/200
26/26 [==============================] - 1s 47ms/step - loss: 0.1589 - categorical_accuracy: 0.9444 - val_loss: 0.3032 - val_categorical_accuracy: 0.9020
Epoch 91/200
26/26 [==============================] - 1s 53ms/step - loss: 0.1492 - categorical_accuracy: 0.9493 - val_loss: 0.3025 - val_categorical_accuracy: 0.9135
Epoch 92/200
26/26 [==============================] - 1s 51ms/step - loss: 0.1475 - categorical_accuracy: 0.9506 - val_loss: 0.2972 - val_categorical_accuracy: 0.9107
Epoch 93/200
26/26 [==============================] - 1s 47ms/step - loss: 0.1530 - categorical_accuracy: 0.9481 - val_loss: 0.3161 - val_categorical_accuracy: 0.8963
Epoch 94/200
26/26 [==============================] - 1s 56ms/step - loss: 0.1588 - categorical_accuracy: 0.9468 - val_loss: 0.3458 - val_categorical_accuracy: 0.8991
Epoch 95/200
26/26 [==============================] - 1s 45ms/step - loss: 0.1548 - categorical_accuracy: 0.9481 - val_loss: 0.3196 - val_categorical_accuracy: 0.9020
Epoch 96/200
26/26 [==============================] - 1s 52ms/step - loss: 0.1508 - categorical_accuracy: 0.9530 - val_loss: 0.3195 - val_categorical_accuracy: 0.8991
Epoch 97/200
26/26 [==============================] - 1s 48ms/step - loss: 0.1382 - categorical_accuracy: 0.9543 - val_loss: 0.2955 - val_categorical_accuracy: 0.9020
Epoch 98/200
26/26 [==============================] - 1s 56ms/step - loss: 0.1563 - categorical_accuracy: 0.9493 - val_loss: 0.2971 - val_categorical_accuracy: 0.8963
Epoch 99/200
26/26 [==============================] - 1s 51ms/step - loss: 0.1564 - categorical_accuracy: 0.9493 - val_loss: 0.2961 - val_categorical_accuracy: 0.9107
Epoch 100/200
26/26 [==============================] - 1s 51ms/step - loss: 0.1290 - categorical_accuracy: 0.9555 - val_loss: 0.2988 - val_categorical_accuracy: 0.9049
Epoch 101/200
26/26 [==============================] - 2s 56ms/step - loss: 0.1517 - categorical_accuracy: 0.9506 - val_loss: 0.2842 - val_categorical_accuracy: 0.9020
Epoch 102/200
26/26 [==============================] - 1s 49ms/step - loss: 0.1364 - categorical_accuracy: 0.9530 - val_loss: 0.3293 - val_categorical_accuracy: 0.8991
Epoch 103/200
26/26 [==============================] - 2s 59ms/step - loss: 0.1560 - categorical_accuracy: 0.9555 - val_loss: 0.3093 - val_categorical_accuracy: 0.9049
Epoch 104/200
26/26 [==============================] - 1s 47ms/step - loss: 0.1560 - categorical_accuracy: 0.9567 - val_loss: 0.2872 - val_categorical_accuracy: 0.8934
Epoch 105/200
26/26 [==============================] - 1s 53ms/step - loss: 0.1520 - categorical_accuracy: 0.9506 - val_loss: 0.3280 - val_categorical_accuracy: 0.8991
Epoch 106/200
26/26 [==============================] - 1s 51ms/step - loss: 0.1574 - categorical_accuracy: 0.9370 - val_loss: 0.3224 - val_categorical_accuracy: 0.8934
Epoch 107/200
26/26 [==============================] - 1s 51ms/step - loss: 0.1547 - categorical_accuracy: 0.9518 - val_loss: 0.3440 - val_categorical_accuracy: 0.9049
Epoch 108/200
26/26 [==============================] - 1s 47ms/step - loss: 0.1216 - categorical_accuracy: 0.9592 - val_loss: 0.2930 - val_categorical_accuracy: 0.9107
Epoch 109/200
26/26 [==============================] - 1s 53ms/step - loss: 0.1224 - categorical_accuracy: 0.9617 - val_loss: 0.3129 - val_categorical_accuracy: 0.9020
Epoch 110/200
26/26 [==============================] - 1s 50ms/step - loss: 0.1399 - categorical_accuracy: 0.9580 - val_loss: 0.3009 - val_categorical_accuracy: 0.9049
Epoch 111/200
26/26 [==============================] - 1s 45ms/step - loss: 0.1375 - categorical_accuracy: 0.9506 - val_loss: 0.3143 - val_categorical_accuracy: 0.8963
Epoch 112/200
26/26 [==============================] - 1s 56ms/step - loss: 0.1573 - categorical_accuracy: 0.9481 - val_loss: 0.3677 - val_categorical_accuracy: 0.8991
Epoch 113/200
26/26 [==============================] - 1s 45ms/step - loss: 0.1411 - categorical_accuracy: 0.9456 - val_loss: 0.4093 - val_categorical_accuracy: 0.9020
Epoch 114/200
26/26 [==============================] - 1s 50ms/step - loss: 0.1622 - categorical_accuracy: 0.9444 - val_loss: 0.3637 - val_categorical_accuracy: 0.9049
Epoch 115/200
26/26 [==============================] - 1s 47ms/step - loss: 0.1677 - categorical_accuracy: 0.9394 - val_loss: 0.4395 - val_categorical_accuracy: 0.8790
Epoch 116/200
26/26 [==============================] - 1s 52ms/step - loss: 0.1397 - categorical_accuracy: 0.9444 - val_loss: 0.3332 - val_categorical_accuracy: 0.9078
Epoch 117/200
26/26 [==============================] - 2s 60ms/step - loss: 0.1290 - categorical_accuracy: 0.9543 - val_loss: 0.3571 - val_categorical_accuracy: 0.9107
Epoch 118/200
26/26 [==============================] - 1s 50ms/step - loss: 0.1296 - categorical_accuracy: 0.9555 - val_loss: 0.3581 - val_categorical_accuracy: 0.8963
Epoch 119/200
26/26 [==============================] - 1s 50ms/step - loss: 0.1217 - categorical_accuracy: 0.9604 - val_loss: 0.3029 - val_categorical_accuracy: 0.9049
Epoch 120/200
26/26 [==============================] - 1s 47ms/step - loss: 0.1187 - categorical_accuracy: 0.9567 - val_loss: 0.2987 - val_categorical_accuracy: 0.9107
Epoch 121/200
26/26 [==============================] - 2s 62ms/step - loss: 0.1418 - categorical_accuracy: 0.9543 - val_loss: 0.3171 - val_categorical_accuracy: 0.9049
Epoch 122/200
26/26 [==============================] - 1s 47ms/step - loss: 0.1585 - categorical_accuracy: 0.9431 - val_loss: 0.2981 - val_categorical_accuracy: 0.8991
Epoch 123/200
26/26 [==============================] - 2s 65ms/step - loss: 0.1230 - categorical_accuracy: 0.9629 - val_loss: 0.3003 - val_categorical_accuracy: 0.8991
Epoch 124/200
26/26 [==============================] - 1s 47ms/step - loss: 0.1195 - categorical_accuracy: 0.9654 - val_loss: 0.3327 - val_categorical_accuracy: 0.8991
Epoch 125/200
26/26 [==============================] - 2s 63ms/step - loss: 0.1201 - categorical_accuracy: 0.9629 - val_loss: 0.3171 - val_categorical_accuracy: 0.9020
Epoch 126/200
26/26 [==============================] - 1s 48ms/step - loss: 0.1276 - categorical_accuracy: 0.9580 - val_loss: 0.3159 - val_categorical_accuracy: 0.9020
Epoch 127/200
26/26 [==============================] - 2s 64ms/step - loss: 0.1085 - categorical_accuracy: 0.9629 - val_loss: 0.3315 - val_categorical_accuracy: 0.9049
Epoch 128/200
26/26 [==============================] - 1s 50ms/step - loss: 0.1277 - categorical_accuracy: 0.9604 - val_loss: 0.3134 - val_categorical_accuracy: 0.9107
Epoch 129/200
26/26 [==============================] - 1s 55ms/step - loss: 0.1233 - categorical_accuracy: 0.9592 - val_loss: 0.3195 - val_categorical_accuracy: 0.9020
Epoch 130/200
26/26 [==============================] - 1s 43ms/step - loss: 0.1377 - categorical_accuracy: 0.9567 - val_loss: 0.3728 - val_categorical_accuracy: 0.9020
Epoch 131/200
26/26 [==============================] - 1s 55ms/step - loss: 0.1398 - categorical_accuracy: 0.9518 - val_loss: 0.3181 - val_categorical_accuracy: 0.9107
Epoch 132/200
26/26 [==============================] - 2s 56ms/step - loss: 0.1235 - categorical_accuracy: 0.9555 - val_loss: 0.3079 - val_categorical_accuracy: 0.8963
Epoch 133/200
26/26 [==============================] - 1s 50ms/step - loss: 0.1210 - categorical_accuracy: 0.9567 - val_loss: 0.3701 - val_categorical_accuracy: 0.9020
Epoch 134/200
26/26 [==============================] - 2s 61ms/step - loss: 0.1191 - categorical_accuracy: 0.9629 - val_loss: 0.3074 - val_categorical_accuracy: 0.8991
Epoch 135/200
26/26 [==============================] - 1s 48ms/step - loss: 0.1225 - categorical_accuracy: 0.9543 - val_loss: 0.3655 - val_categorical_accuracy: 0.8991
Epoch 136/200
26/26 [==============================] - 2s 59ms/step - loss: 0.1068 - categorical_accuracy: 0.9654 - val_loss: 0.3240 - val_categorical_accuracy: 0.9049
Epoch 137/200
26/26 [==============================] - 1s 51ms/step - loss: 0.1191 - categorical_accuracy: 0.9580 - val_loss: 0.2843 - val_categorical_accuracy: 0.9107
Epoch 138/200
26/26 [==============================] - 1s 53ms/step - loss: 0.1126 - categorical_accuracy: 0.9642 - val_loss: 0.3273 - val_categorical_accuracy: 0.9164
Epoch 139/200
26/26 [==============================] - 1s 51ms/step - loss: 0.1171 - categorical_accuracy: 0.9567 - val_loss: 0.3140 - val_categorical_accuracy: 0.9135
Epoch 140/200
26/26 [==============================] - 1s 54ms/step - loss: 0.1227 - categorical_accuracy: 0.9580 - val_loss: 0.3162 - val_categorical_accuracy: 0.9049
Epoch 141/200
26/26 [==============================] - 1s 56ms/step - loss: 0.1182 - categorical_accuracy: 0.9592 - val_loss: 0.3462 - val_categorical_accuracy: 0.9078
Epoch 142/200
26/26 [==============================] - 1s 50ms/step - loss: 0.1121 - categorical_accuracy: 0.9617 - val_loss: 0.3438 - val_categorical_accuracy: 0.9078
Epoch 143/200
26/26 [==============================] - 1s 52ms/step - loss: 0.1167 - categorical_accuracy: 0.9592 - val_loss: 0.3972 - val_categorical_accuracy: 0.9107
Epoch 144/200
26/26 [==============================] - 1s 45ms/step - loss: 0.1134 - categorical_accuracy: 0.9654 - val_loss: 0.2874 - val_categorical_accuracy: 0.9193
Epoch 145/200
26/26 [==============================] - 2s 58ms/step - loss: 0.1110 - categorical_accuracy: 0.9629 - val_loss: 0.3246 - val_categorical_accuracy: 0.9020
Epoch 146/200
26/26 [==============================] - 1s 43ms/step - loss: 0.1282 - categorical_accuracy: 0.9567 - val_loss: 0.3549 - val_categorical_accuracy: 0.9107
Epoch 147/200
26/26 [==============================] - 1s 56ms/step - loss: 0.1268 - categorical_accuracy: 0.9604 - val_loss: 0.2729 - val_categorical_accuracy: 0.9164
Epoch 148/200
26/26 [==============================] - 1s 49ms/step - loss: 0.1175 - categorical_accuracy: 0.9580 - val_loss: 0.3084 - val_categorical_accuracy: 0.9049
Epoch 149/200
26/26 [==============================] - 1s 53ms/step - loss: 0.0977 - categorical_accuracy: 0.9666 - val_loss: 0.3354 - val_categorical_accuracy: 0.9049
Epoch 150/200
26/26 [==============================] - 1s 53ms/step - loss: 0.0992 - categorical_accuracy: 0.9654 - val_loss: 0.3862 - val_categorical_accuracy: 0.9107
Epoch 151/200
26/26 [==============================] - 1s 51ms/step - loss: 0.1141 - categorical_accuracy: 0.9604 - val_loss: 0.2888 - val_categorical_accuracy: 0.9049
Epoch 152/200
26/26 [==============================] - 2s 59ms/step - loss: 0.1004 - categorical_accuracy: 0.9629 - val_loss: 0.3235 - val_categorical_accuracy: 0.9164
Epoch 153/200
26/26 [==============================] - 1s 48ms/step - loss: 0.0980 - categorical_accuracy: 0.9654 - val_loss: 0.3680 - val_categorical_accuracy: 0.9049
Epoch 154/200
26/26 [==============================] - 2s 61ms/step - loss: 0.1175 - categorical_accuracy: 0.9555 - val_loss: 0.3449 - val_categorical_accuracy: 0.9020
Epoch 155/200
26/26 [==============================] - 1s 46ms/step - loss: 0.1219 - categorical_accuracy: 0.9604 - val_loss: 0.3099 - val_categorical_accuracy: 0.9135
Epoch 156/200
26/26 [==============================] - 1s 48ms/step - loss: 0.0960 - categorical_accuracy: 0.9642 - val_loss: 0.2722 - val_categorical_accuracy: 0.9222
Epoch 157/200
26/26 [==============================] - 1s 52ms/step - loss: 0.1260 - categorical_accuracy: 0.9555 - val_loss: 0.3357 - val_categorical_accuracy: 0.9020
Epoch 158/200
26/26 [==============================] - 2s 62ms/step - loss: 0.1016 - categorical_accuracy: 0.9679 - val_loss: 0.3250 - val_categorical_accuracy: 0.9222
Epoch 159/200
26/26 [==============================] - 1s 55ms/step - loss: 0.1237 - categorical_accuracy: 0.9592 - val_loss: 0.2885 - val_categorical_accuracy: 0.9107
Epoch 160/200
26/26 [==============================] - 1s 50ms/step - loss: 0.1304 - categorical_accuracy: 0.9493 - val_loss: 0.3528 - val_categorical_accuracy: 0.9049
Epoch 161/200
26/26 [==============================] - 1s 52ms/step - loss: 0.0865 - categorical_accuracy: 0.9642 - val_loss: 0.3321 - val_categorical_accuracy: 0.9107
Epoch 162/200
26/26 [==============================] - 1s 47ms/step - loss: 0.1199 - categorical_accuracy: 0.9654 - val_loss: 0.3651 - val_categorical_accuracy: 0.9020
Epoch 163/200
26/26 [==============================] - 2s 56ms/step - loss: 0.1064 - categorical_accuracy: 0.9617 - val_loss: 0.3214 - val_categorical_accuracy: 0.9135
Epoch 164/200
26/26 [==============================] - 1s 45ms/step - loss: 0.1367 - categorical_accuracy: 0.9543 - val_loss: 0.3295 - val_categorical_accuracy: 0.9078
Epoch 165/200
26/26 [==============================] - 2s 59ms/step - loss: 0.1081 - categorical_accuracy: 0.9666 - val_loss: 0.3672 - val_categorical_accuracy: 0.9078
Epoch 166/200
26/26 [==============================] - 1s 50ms/step - loss: 0.1131 - categorical_accuracy: 0.9617 - val_loss: 0.3655 - val_categorical_accuracy: 0.9049
Epoch 167/200
26/26 [==============================] - 2s 58ms/step - loss: 0.1185 - categorical_accuracy: 0.9592 - val_loss: 0.4254 - val_categorical_accuracy: 0.9020
Epoch 168/200
26/26 [==============================] - 1s 49ms/step - loss: 0.1024 - categorical_accuracy: 0.9666 - val_loss: 0.3518 - val_categorical_accuracy: 0.8991
Epoch 169/200
26/26 [==============================] - 1s 51ms/step - loss: 0.1088 - categorical_accuracy: 0.9642 - val_loss: 0.3401 - val_categorical_accuracy: 0.9078
Epoch 170/200
26/26 [==============================] - 1s 52ms/step - loss: 0.1186 - categorical_accuracy: 0.9629 - val_loss: 0.3788 - val_categorical_accuracy: 0.9020
Epoch 171/200
26/26 [==============================] - 1s 52ms/step - loss: 0.1057 - categorical_accuracy: 0.9580 - val_loss: 0.3336 - val_categorical_accuracy: 0.9107
Epoch 172/200
26/26 [==============================] - 2s 61ms/step - loss: 0.0779 - categorical_accuracy: 0.9740 - val_loss: 0.3493 - val_categorical_accuracy: 0.9107
Epoch 173/200
26/26 [==============================] - 1s 50ms/step - loss: 0.1098 - categorical_accuracy: 0.9642 - val_loss: 0.2875 - val_categorical_accuracy: 0.9135
Epoch 174/200
26/26 [==============================] - 2s 63ms/step - loss: 0.1225 - categorical_accuracy: 0.9555 - val_loss: 0.3049 - val_categorical_accuracy: 0.9107
Epoch 175/200
26/26 [==============================] - 1s 44ms/step - loss: 0.1001 - categorical_accuracy: 0.9691 - val_loss: 0.3955 - val_categorical_accuracy: 0.9049
Epoch 176/200
26/26 [==============================] - 2s 64ms/step - loss: 0.0944 - categorical_accuracy: 0.9679 - val_loss: 0.4160 - val_categorical_accuracy: 0.9107
Epoch 177/200
26/26 [==============================] - 1s 50ms/step - loss: 0.0991 - categorical_accuracy: 0.9617 - val_loss: 0.3676 - val_categorical_accuracy: 0.9078
Epoch 178/200
26/26 [==============================] - 2s 62ms/step - loss: 0.0908 - categorical_accuracy: 0.9765 - val_loss: 0.3611 - val_categorical_accuracy: 0.9020
Epoch 179/200
26/26 [==============================] - 1s 53ms/step - loss: 0.1086 - categorical_accuracy: 0.9629 - val_loss: 0.3668 - val_categorical_accuracy: 0.9049
Epoch 180/200
26/26 [==============================] - 1s 52ms/step - loss: 0.0974 - categorical_accuracy: 0.9679 - val_loss: 0.3210 - val_categorical_accuracy: 0.9049
Epoch 181/200
26/26 [==============================] - 1s 56ms/step - loss: 0.1026 - categorical_accuracy: 0.9703 - val_loss: 0.3909 - val_categorical_accuracy: 0.8905
Epoch 182/200
26/26 [==============================] - 1s 47ms/step - loss: 0.1263 - categorical_accuracy: 0.9518 - val_loss: 0.3522 - val_categorical_accuracy: 0.9107
Epoch 183/200
26/26 [==============================] - 1s 52ms/step - loss: 0.1182 - categorical_accuracy: 0.9567 - val_loss: 0.3563 - val_categorical_accuracy: 0.9193
Epoch 184/200
26/26 [==============================] - 1s 44ms/step - loss: 0.1194 - categorical_accuracy: 0.9604 - val_loss: 0.3464 - val_categorical_accuracy: 0.9020
Epoch 185/200
26/26 [==============================] - 1s 55ms/step - loss: 0.1095 - categorical_accuracy: 0.9642 - val_loss: 0.3905 - val_categorical_accuracy: 0.9078
Epoch 186/200
26/26 [==============================] - 1s 50ms/step - loss: 0.1069 - categorical_accuracy: 0.9604 - val_loss: 0.3212 - val_categorical_accuracy: 0.9107
Epoch 187/200
26/26 [==============================] - 1s 50ms/step - loss: 0.0839 - categorical_accuracy: 0.9703 - val_loss: 0.3170 - val_categorical_accuracy: 0.9135
Epoch 188/200
26/26 [==============================] - 2s 58ms/step - loss: 0.0995 - categorical_accuracy: 0.9716 - val_loss: 0.3817 - val_categorical_accuracy: 0.8991
Epoch 189/200
26/26 [==============================] - 1s 47ms/step - loss: 0.0841 - categorical_accuracy: 0.9716 - val_loss: 0.3585 - val_categorical_accuracy: 0.9078
Epoch 190/200
26/26 [==============================] - 2s 61ms/step - loss: 0.0933 - categorical_accuracy: 0.9691 - val_loss: 0.3847 - val_categorical_accuracy: 0.9020
Epoch 191/200
26/26 [==============================] - 1s 48ms/step - loss: 0.0994 - categorical_accuracy: 0.9617 - val_loss: 0.3820 - val_categorical_accuracy: 0.9049
Epoch 192/200
26/26 [==============================] - 1s 55ms/step - loss: 0.0811 - categorical_accuracy: 0.9691 - val_loss: 0.3638 - val_categorical_accuracy: 0.9107
Epoch 193/200
26/26 [==============================] - 1s 43ms/step - loss: 0.1029 - categorical_accuracy: 0.9679 - val_loss: 0.4541 - val_categorical_accuracy: 0.9020
Epoch 194/200
26/26 [==============================] - 1s 40ms/step - loss: 0.0884 - categorical_accuracy: 0.9703 - val_loss: 0.3429 - val_categorical_accuracy: 0.9135
Epoch 195/200
26/26 [==============================] - 1s 43ms/step - loss: 0.0896 - categorical_accuracy: 0.9703 - val_loss: 0.3746 - val_categorical_accuracy: 0.9107
Epoch 196/200
26/26 [==============================] - 1s 46ms/step - loss: 0.0922 - categorical_accuracy: 0.9703 - val_loss: 0.3625 - val_categorical_accuracy: 0.9164
Epoch 197/200
26/26 [==============================] - 2s 64ms/step - loss: 0.0986 - categorical_accuracy: 0.9629 - val_loss: 0.3837 - val_categorical_accuracy: 0.9078
Epoch 198/200
26/26 [==============================] - 1s 49ms/step - loss: 0.0974 - categorical_accuracy: 0.9642 - val_loss: 0.4300 - val_categorical_accuracy: 0.8991
Epoch 199/200
26/26 [==============================] - 2s 64ms/step - loss: 0.0967 - categorical_accuracy: 0.9716 - val_loss: 0.4291 - val_categorical_accuracy: 0.9107
Epoch 200/200
26/26 [==============================] - 1s 45ms/step - loss: 0.0858 - categorical_accuracy: 0.9679 - val_loss: 0.3825 - val_categorical_accuracy: 0.9135
In [23]:
#Evaluate the 1DCNN model on test data
model_cnn.evaluate(X_test_re,y_test_re)
11/11 [==============================] - 0s 20ms/step - loss: 0.3825 - categorical_accuracy: 0.9135
Out[23]:
[0.3825025260448456, 0.9135446548461914]
In [24]:
# Visualize Loss and Accuracy Plot of the 1D CNN
%matplotlib inline
import matplotlib.pyplot as plt
acc= history_model_cnn.history['categorical_accuracy']
val_acc = history_model_cnn.history['val_categorical_accuracy']
loss = history_model_cnn.history['loss']
val_loss = history_model_cnn.history['val_loss']


epochs = range(len(acc))

plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Testing accuracy')
plt.title('Training and Testing accuracy - 1D CNN')
plt.legend()
plt.figure()

plt.plot(epochs, loss,'r', label='Training Loss')
plt.plot(epochs,val_loss,'b', label='Testing Loss')
plt.title('Training and Testing loss - 1D CNN')
plt.legend()

plt.show()

Applying Machine Learning Models on GeneratedLand_Mark dataset¶

In [25]:
# Fitting Generated landmark datset on Random Forest classifier
import numpy as np
seed = np.random.seed(22)
rng=np.random.RandomState(3)
from sklearn.model_selection import train_test_split, RepeatedStratifiedKFold
cv=RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=1)

#RandomizedSearchCV for hyperparameter tuning
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier
params= {'n_estimators':[10,20,30,40,50,60,70,80,90,100], 'max_features': ['log2','sqrt'],'max_depth':[2,4,6,8,10],'min_samples_split':[2,5],'min_samples_leaf':[1,2],'bootstrap':[True,False]}
random_forest=RandomizedSearchCV(RandomForestClassifier(random_state=rng),param_distributions=params,n_iter=5,scoring='accuracy',n_jobs=-1,cv=cv,verbose=3,random_state=rng)
random_forest.fit(X_train,y_train)
Fitting 15 folds for each of 5 candidates, totalling 75 fits
Out[25]:
RandomizedSearchCV(cv=RepeatedStratifiedKFold(n_repeats=3, n_splits=5, random_state=1),
                   estimator=RandomForestClassifier(random_state=RandomState(MT19937) at 0x1D22603FA40),
                   n_iter=5, n_jobs=-1,
                   param_distributions={'bootstrap': [True, False],
                                        'max_depth': [2, 4, 6, 8, 10],
                                        'max_features': ['log2', 'sqrt'],
                                        'min_samples_leaf': [1, 2],
                                        'min_samples_split': [2, 5],
                                        'n_estimators': [10, 20, 30, 40, 50, 60,
                                                         70, 80, 90, 100]},
                   random_state=RandomState(MT19937) at 0x1D22603FA40,
                   scoring='accuracy', verbose=3)
In [26]:
# Random Forest model Evaluation
# best parameters
print(random_forest.best_params_)
{'n_estimators': 100, 'min_samples_split': 2, 'min_samples_leaf': 2, 'max_features': 'sqrt', 'max_depth': 10, 'bootstrap': False}
In [27]:
print("Accuracy is:", random_forest.score(X_test,y_test))
Accuracy is: 0.9077809798270894
In [28]:
from sklearn import metrics
import seaborn as sns
y_pred_random=random_forest.predict(X_test)
random_forest_cm = metrics.confusion_matrix(y_test,y_pred_random)
# create seaborn heatmap with required labels
sns.heatmap(random_forest_cm,annot = True,cmap='Reds',fmt='g',xticklabels=yoga_pose_dict.values(),yticklabels=yoga_pose_dict.values())
Out[28]:
<AxesSubplot:>
In [29]:
# Model Accuracy, how often is the classifier correct?
print("Accuracy-Random forest:",round((metrics.accuracy_score(y_test, y_pred_random))*100,2))
print("Precision-Random Forest:",round((metrics.precision_score(y_test, y_pred_random, average="macro"))*100,2))
print("Recall-Random Forest:", round((metrics.recall_score(y_test, y_pred_random, average="macro"))*100,2))
print("F1 Score -RandomForest:",round((metrics.f1_score(y_test,y_pred_random,average="macro"))*100,2))
Accuracy-Random forest: 90.78
Precision-Random Forest: 91.07
Recall-Random Forest: 89.83
F1 Score -RandomForest: 90.22
In [30]:
### XgBoost Classifier
seed = np.random.seed(22)
rng = np.random.RandomState(2)
from sklearn.model_selection import train_test_split, RepeatedStratifiedKFold

cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=1)

from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from xgboost import XGBClassifier

# RandomizedSearchCV for Hyperparameter tuning
params={'alpha':[0.001,0.01,0.1],'max_depth':[1,2,3,4,5,10], 'learning_rate':[0.1,0.25,0.5]}
xgboost=RandomizedSearchCV(XGBClassifier(random_state=rng),param_distributions=params,n_iter=5,scoring='accuracy',n_jobs=-1,cv=cv,verbose=3,random_state=rng)
xgboost.fit(X_train,y_train)
Fitting 15 folds for each of 5 candidates, totalling 75 fits
Out[30]:
RandomizedSearchCV(cv=RepeatedStratifiedKFold(n_repeats=3, n_splits=5, random_state=1),
                   estimator=XGBClassifier(base_score=None, booster=None,
                                           callbacks=None,
                                           colsample_bylevel=None,
                                           colsample_bynode=None,
                                           colsample_bytree=None,
                                           early_stopping_rounds=None,
                                           enable_categorical=False,
                                           eval_metric=None, feature_types=None,
                                           gamma=None, gpu_id=None,
                                           grow_policy=None,
                                           im...
                                           min_child_weight=None, missing=nan,
                                           monotone_constraints=None,
                                           n_estimators=100, n_jobs=None,
                                           num_parallel_tree=None,
                                           predictor=None, random_state=35923, ...),
                   n_iter=5, n_jobs=-1,
                   param_distributions={'alpha': [0.001, 0.01, 0.1],
                                        'learning_rate': [0.1, 0.25, 0.5],
                                        'max_depth': [1, 2, 3, 4, 5, 10]},
                   random_state=RandomState(MT19937) at 0x1D230F34040,
                   scoring='accuracy', verbose=3)
In [31]:
print(xgboost.best_params_)
print("Accuracy is:", xgboost.score(X_test,y_test))
{'max_depth': 3, 'learning_rate': 0.25, 'alpha': 0.1}
Accuracy is: 0.9164265129682997
In [32]:
from sklearn import metrics
import seaborn as sns
y_pred_xgboost=xgboost.predict(X_test)
cm = metrics.confusion_matrix(y_test,y_pred_xgboost)
# create seaborn heatmap with required labels
sns.heatmap(cm,annot = True,cmap='Reds',fmt='g',xticklabels=yoga_pose_dict.values(),yticklabels=yoga_pose_dict.values())
Out[32]:
<AxesSubplot:>
In [33]:
print("Accuracy-xgboost:",round((metrics.accuracy_score(y_test, y_pred_xgboost))*100,2))
print("Precision-xgboost:",round((metrics.precision_score(y_test, y_pred_xgboost, average="macro"))*100,2))
print("Recall-xgboost:", round((metrics.recall_score(y_test, y_pred_xgboost, average="macro"))*100,2))
print("F1 Score -xgboost:",round((metrics.f1_score(y_test,y_pred_xgboost,average="macro"))*100,2))
Accuracy-xgboost: 91.64
Precision-xgboost: 91.59
Recall-xgboost: 90.65
F1 Score -xgboost: 91.0
In [34]:
from sklearn.svm import SVC
from sklearn.model_selection import RandomizedSearchCV
seed=np.random.seed(33)
rng=np.random.RandomState(3)
cv= RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=1)

param_grid={'C':[0.1,1,10,20,50,100],'kernel':['rbf','poly','sigmoid','linear'],'degree':[1,2,3,4,5,6]}
model_svm=RandomizedSearchCV(SVC(random_state =rng),param_distributions=param_grid,n_iter=5,scoring='accuracy',n_jobs=-1,cv=cv,verbose=3,random_state =rng)
model_svm.fit(X_train,y_train)
Fitting 15 folds for each of 5 candidates, totalling 75 fits
Out[34]:
RandomizedSearchCV(cv=RepeatedStratifiedKFold(n_repeats=3, n_splits=5, random_state=1),
                   estimator=SVC(random_state=RandomState(MT19937) at 0x1D22603F940),
                   n_iter=5, n_jobs=-1,
                   param_distributions={'C': [0.1, 1, 10, 20, 50, 100],
                                        'degree': [1, 2, 3, 4, 5, 6],
                                        'kernel': ['rbf', 'poly', 'sigmoid',
                                                   'linear']},
                   random_state=RandomState(MT19937) at 0x1D22603F940,
                   scoring='accuracy', verbose=3)
In [35]:
print(model_svm.best_params_)
print("Accuracy is:", model_svm.score(X_test,y_test))
{'kernel': 'poly', 'degree': 6, 'C': 1}
Accuracy is: 0.9077809798270894
In [36]:
from sklearn import metrics
import seaborn as sns
y_pred_svm=model_svm.predict(X_test)
svm_cm = metrics.confusion_matrix(y_test,y_pred_svm)
# create seaborn heatmap with required labels
sns.heatmap(svm_cm,annot = True,cmap='Reds',fmt='g',xticklabels=yoga_pose_dict.values(),yticklabels=yoga_pose_dict.values())
Out[36]:
<AxesSubplot:>
In [37]:
print(classification_report(y_test,y_pred_svm))
              precision    recall  f1-score   support

           0       0.99      0.97      0.98        88
           1       0.92      0.92      0.92        65
           2       0.83      0.83      0.83        52
           3       0.96      0.81      0.88        59
           4       0.84      0.95      0.89        83

    accuracy                           0.91       347
   macro avg       0.91      0.90      0.90       347
weighted avg       0.91      0.91      0.91       347

In [38]:
print("Accuracy-svm:",round((metrics.accuracy_score(y_test, y_pred_svm))*100,2))
print("Precision-svm:",round((metrics.precision_score(y_test, y_pred_svm, average="macro"))*100,2))
print("Recall-svm:", round((metrics.recall_score(y_test, y_pred_svm, average="macro"))*100,2))
print("F1 Score -svm:",round((metrics.f1_score(y_test,y_pred_svm,average="macro"))*100,2))
Accuracy-svm: 90.78
Precision-svm: 90.78
Recall-svm: 89.63
F1 Score -svm: 90.01
In [39]:
### Decision tree Classifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import RandomizedSearchCV
seed = np.random.seed(44)
rng= np.random.RandomState(4)
cv= RepeatedStratifiedKFold(n_splits=5,n_repeats=3, random_state=1)
param_grid={'max_depth':[10,30,50,60,90,100],'max_features':['auto','sqrt','log2'],'min_samples_split':[2,4,6]}
model_decision= RandomizedSearchCV(DecisionTreeClassifier(random_state =rng),param_distributions=param_grid,n_iter=5,scoring='accuracy',n_jobs=-1,cv=cv,verbose=3,random_state=rng)
model_decision.fit(X_train,y_train)
Fitting 15 folds for each of 5 candidates, totalling 75 fits
Out[39]:
RandomizedSearchCV(cv=RepeatedStratifiedKFold(n_repeats=3, n_splits=5, random_state=1),
                   estimator=DecisionTreeClassifier(random_state=RandomState(MT19937) at 0x1D230F34940),
                   n_iter=5, n_jobs=-1,
                   param_distributions={'max_depth': [10, 30, 50, 60, 90, 100],
                                        'max_features': ['auto', 'sqrt',
                                                         'log2'],
                                        'min_samples_split': [2, 4, 6]},
                   random_state=RandomState(MT19937) at 0x1D230F34940,
                   scoring='accuracy', verbose=3)
In [40]:
print(model_decision.best_params_)
print("Accuracy is:", model_decision.score(X_test,y_test))
{'min_samples_split': 2, 'max_features': 'sqrt', 'max_depth': 30}
Accuracy is: 0.8645533141210374
In [41]:
from sklearn import metrics
import seaborn as sns
y_pred_decision=model_decision.predict(X_test)
decision_cm = metrics.confusion_matrix(y_test,y_pred_decision)
# create seaborn heatmap with required labels
sns.heatmap(decision_cm,annot = True,cmap='Reds',fmt='g',xticklabels=yoga_pose_dict.values(),yticklabels=yoga_pose_dict.values())
Out[41]:
<AxesSubplot:>
In [42]:
print("Accuracy-decision tree:",round((metrics.accuracy_score(y_test, y_pred_decision))*100,2))
print("Precision-decision tree:",round((metrics.precision_score(y_test, y_pred_decision, average="macro"))*100,2))
print("Recall-decision tree:", round((metrics.recall_score(y_test, y_pred_decision, average="macro"))*100,2))
print("F1 Score -decision tree:",round((metrics.f1_score(y_test,y_pred_decision,average="macro"))*100,2))
Accuracy-decision tree: 86.46
Precision-decision tree: 85.64
Recall-decision tree: 85.81
F1 Score -decision tree: 85.71
In [ ]:
 
In [43]:
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score

k_values = list(range(1, 20))
accuracy_values = []

for k in k_values:
    knn = KNeighborsClassifier(n_neighbors=k)
    knn.fit(X_train, y_train)
    y_pred = knn.predict(X_test)
    accuracy = accuracy_score(y_test, y_pred)
    accuracy_values.append(accuracy)

plt.plot(k_values, accuracy_values, marker='o')
plt.xlabel('k')
plt.ylabel('Accuracy')
plt.title('Accuracy vs. k Values')
plt.show()
C:\Users\jhaan\anaconda3\lib\site-packages\sklearn\neighbors\_classification.py:228: FutureWarning: Unlike other reduction functions (e.g. `skew`, `kurtosis`), the default behavior of `mode` typically preserves the axis it acts along. In SciPy 1.11.0, this behavior will change: the default value of `keepdims` will become False, the `axis` over which the statistic is taken will be eliminated, and the value None will no longer be accepted. Set `keepdims` to True or False to avoid this warning.
  mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
C:\Users\jhaan\anaconda3\lib\site-packages\sklearn\neighbors\_classification.py:228: FutureWarning: Unlike other reduction functions (e.g. `skew`, `kurtosis`), the default behavior of `mode` typically preserves the axis it acts along. In SciPy 1.11.0, this behavior will change: the default value of `keepdims` will become False, the `axis` over which the statistic is taken will be eliminated, and the value None will no longer be accepted. Set `keepdims` to True or False to avoid this warning.
  mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
C:\Users\jhaan\anaconda3\lib\site-packages\sklearn\neighbors\_classification.py:228: FutureWarning: Unlike other reduction functions (e.g. `skew`, `kurtosis`), the default behavior of `mode` typically preserves the axis it acts along. In SciPy 1.11.0, this behavior will change: the default value of `keepdims` will become False, the `axis` over which the statistic is taken will be eliminated, and the value None will no longer be accepted. Set `keepdims` to True or False to avoid this warning.
  mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
C:\Users\jhaan\anaconda3\lib\site-packages\sklearn\neighbors\_classification.py:228: FutureWarning: Unlike other reduction functions (e.g. `skew`, `kurtosis`), the default behavior of `mode` typically preserves the axis it acts along. In SciPy 1.11.0, this behavior will change: the default value of `keepdims` will become False, the `axis` over which the statistic is taken will be eliminated, and the value None will no longer be accepted. Set `keepdims` to True or False to avoid this warning.
  mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
C:\Users\jhaan\anaconda3\lib\site-packages\sklearn\neighbors\_classification.py:228: FutureWarning: Unlike other reduction functions (e.g. `skew`, `kurtosis`), the default behavior of `mode` typically preserves the axis it acts along. In SciPy 1.11.0, this behavior will change: the default value of `keepdims` will become False, the `axis` over which the statistic is taken will be eliminated, and the value None will no longer be accepted. Set `keepdims` to True or False to avoid this warning.
  mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
C:\Users\jhaan\anaconda3\lib\site-packages\sklearn\neighbors\_classification.py:228: FutureWarning: Unlike other reduction functions (e.g. `skew`, `kurtosis`), the default behavior of `mode` typically preserves the axis it acts along. In SciPy 1.11.0, this behavior will change: the default value of `keepdims` will become False, the `axis` over which the statistic is taken will be eliminated, and the value None will no longer be accepted. Set `keepdims` to True or False to avoid this warning.
  mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
C:\Users\jhaan\anaconda3\lib\site-packages\sklearn\neighbors\_classification.py:228: FutureWarning: Unlike other reduction functions (e.g. `skew`, `kurtosis`), the default behavior of `mode` typically preserves the axis it acts along. In SciPy 1.11.0, this behavior will change: the default value of `keepdims` will become False, the `axis` over which the statistic is taken will be eliminated, and the value None will no longer be accepted. Set `keepdims` to True or False to avoid this warning.
  mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
C:\Users\jhaan\anaconda3\lib\site-packages\sklearn\neighbors\_classification.py:228: FutureWarning: Unlike other reduction functions (e.g. `skew`, `kurtosis`), the default behavior of `mode` typically preserves the axis it acts along. In SciPy 1.11.0, this behavior will change: the default value of `keepdims` will become False, the `axis` over which the statistic is taken will be eliminated, and the value None will no longer be accepted. Set `keepdims` to True or False to avoid this warning.
  mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
C:\Users\jhaan\anaconda3\lib\site-packages\sklearn\neighbors\_classification.py:228: FutureWarning: Unlike other reduction functions (e.g. `skew`, `kurtosis`), the default behavior of `mode` typically preserves the axis it acts along. In SciPy 1.11.0, this behavior will change: the default value of `keepdims` will become False, the `axis` over which the statistic is taken will be eliminated, and the value None will no longer be accepted. Set `keepdims` to True or False to avoid this warning.
  mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
C:\Users\jhaan\anaconda3\lib\site-packages\sklearn\neighbors\_classification.py:228: FutureWarning: Unlike other reduction functions (e.g. `skew`, `kurtosis`), the default behavior of `mode` typically preserves the axis it acts along. In SciPy 1.11.0, this behavior will change: the default value of `keepdims` will become False, the `axis` over which the statistic is taken will be eliminated, and the value None will no longer be accepted. Set `keepdims` to True or False to avoid this warning.
  mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
C:\Users\jhaan\anaconda3\lib\site-packages\sklearn\neighbors\_classification.py:228: FutureWarning: Unlike other reduction functions (e.g. `skew`, `kurtosis`), the default behavior of `mode` typically preserves the axis it acts along. In SciPy 1.11.0, this behavior will change: the default value of `keepdims` will become False, the `axis` over which the statistic is taken will be eliminated, and the value None will no longer be accepted. Set `keepdims` to True or False to avoid this warning.
  mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
C:\Users\jhaan\anaconda3\lib\site-packages\sklearn\neighbors\_classification.py:228: FutureWarning: Unlike other reduction functions (e.g. `skew`, `kurtosis`), the default behavior of `mode` typically preserves the axis it acts along. In SciPy 1.11.0, this behavior will change: the default value of `keepdims` will become False, the `axis` over which the statistic is taken will be eliminated, and the value None will no longer be accepted. Set `keepdims` to True or False to avoid this warning.
  mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
C:\Users\jhaan\anaconda3\lib\site-packages\sklearn\neighbors\_classification.py:228: FutureWarning: Unlike other reduction functions (e.g. `skew`, `kurtosis`), the default behavior of `mode` typically preserves the axis it acts along. In SciPy 1.11.0, this behavior will change: the default value of `keepdims` will become False, the `axis` over which the statistic is taken will be eliminated, and the value None will no longer be accepted. Set `keepdims` to True or False to avoid this warning.
  mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
C:\Users\jhaan\anaconda3\lib\site-packages\sklearn\neighbors\_classification.py:228: FutureWarning: Unlike other reduction functions (e.g. `skew`, `kurtosis`), the default behavior of `mode` typically preserves the axis it acts along. In SciPy 1.11.0, this behavior will change: the default value of `keepdims` will become False, the `axis` over which the statistic is taken will be eliminated, and the value None will no longer be accepted. Set `keepdims` to True or False to avoid this warning.
  mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
C:\Users\jhaan\anaconda3\lib\site-packages\sklearn\neighbors\_classification.py:228: FutureWarning: Unlike other reduction functions (e.g. `skew`, `kurtosis`), the default behavior of `mode` typically preserves the axis it acts along. In SciPy 1.11.0, this behavior will change: the default value of `keepdims` will become False, the `axis` over which the statistic is taken will be eliminated, and the value None will no longer be accepted. Set `keepdims` to True or False to avoid this warning.
  mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
C:\Users\jhaan\anaconda3\lib\site-packages\sklearn\neighbors\_classification.py:228: FutureWarning: Unlike other reduction functions (e.g. `skew`, `kurtosis`), the default behavior of `mode` typically preserves the axis it acts along. In SciPy 1.11.0, this behavior will change: the default value of `keepdims` will become False, the `axis` over which the statistic is taken will be eliminated, and the value None will no longer be accepted. Set `keepdims` to True or False to avoid this warning.
  mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
C:\Users\jhaan\anaconda3\lib\site-packages\sklearn\neighbors\_classification.py:228: FutureWarning: Unlike other reduction functions (e.g. `skew`, `kurtosis`), the default behavior of `mode` typically preserves the axis it acts along. In SciPy 1.11.0, this behavior will change: the default value of `keepdims` will become False, the `axis` over which the statistic is taken will be eliminated, and the value None will no longer be accepted. Set `keepdims` to True or False to avoid this warning.
  mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
C:\Users\jhaan\anaconda3\lib\site-packages\sklearn\neighbors\_classification.py:228: FutureWarning: Unlike other reduction functions (e.g. `skew`, `kurtosis`), the default behavior of `mode` typically preserves the axis it acts along. In SciPy 1.11.0, this behavior will change: the default value of `keepdims` will become False, the `axis` over which the statistic is taken will be eliminated, and the value None will no longer be accepted. Set `keepdims` to True or False to avoid this warning.
  mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
C:\Users\jhaan\anaconda3\lib\site-packages\sklearn\neighbors\_classification.py:228: FutureWarning: Unlike other reduction functions (e.g. `skew`, `kurtosis`), the default behavior of `mode` typically preserves the axis it acts along. In SciPy 1.11.0, this behavior will change: the default value of `keepdims` will become False, the `axis` over which the statistic is taken will be eliminated, and the value None will no longer be accepted. Set `keepdims` to True or False to avoid this warning.
  mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
In [44]:
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
C:\Users\jhaan\anaconda3\lib\site-packages\sklearn\neighbors\_classification.py:228: FutureWarning: Unlike other reduction functions (e.g. `skew`, `kurtosis`), the default behavior of `mode` typically preserves the axis it acts along. In SciPy 1.11.0, this behavior will change: the default value of `keepdims` will become False, the `axis` over which the statistic is taken will be eliminated, and the value None will no longer be accepted. Set `keepdims` to True or False to avoid this warning.
  mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
In [45]:
print("Accuracy-knn:",round((metrics.accuracy_score(y_test, y_pred))*100,2))
Accuracy-knn: 89.34

saving the model¶

In [46]:
import pickle
file_name='xgb-reg_fix2.pkl'
pickle.dump(xgboost,open(file_name,"wb"))
xgb_model_loaded=pickle.load(open("xgb-reg_fix.pkl","rb"))
In [47]:
xgb_model_loaded=pickle.load(open("xgb-reg_fix.pkl","rb"))

Yoga Pose detection from Image¶

In [48]:
# read and display the input Image
import matplotlib.pyplot as plt
In [49]:
def poseclassify(path):
    sample_img=cv2.imread(path)
    plt.figure(figsize=[10,10])
    plt.title("Sample_image")
    plt.axis("off")
    plt.imshow(sample_img)
    plt.imshow(sample_img[:,:,::-1])
    plt.show()
    
    # Define pose estimation and skeltal image drawing object
    mp_drawing = mp.solutions.drawing_utils
    mp_pose = mp.solutions.pose
    pose = mp_pose.Pose(static_image_mode=True,model_complexity=2)
    img_copy = sample_img.copy()
    #Convert BGR to RGB format and apply the Blazepose pose estimation model
    results = pose.process(cv2.cvtColor(sample_img,cv2.COLOR_BGR2RGB))
    
    
    # to draw it in 3D Space
    mp_drawing.plot_landmarks(results.pose_world_landmarks,mp_pose.POSE_CONNECTIONS)
    
    # Extract the landmark data and pass to xgboost model for prediction
    landmarks = results.pose_landmarks.landmark

    # Flatten Array
    pose_row = list(np.array([[landmark.x, landmark.y,landmark.z] for landmark in landmarks]).flatten())
    X = pd.DataFrame([pose_row])

    # Xgboost prediction
    X.columns = A
    body_language_class = xgb_model_loaded.predict(X)[0]
    body_language_prob = xgb_model_loaded.predict_proba(X)[0]
    print(body_language_class, body_language_prob)

    pose_detected= yoga_pose_dict[body_language_class]
    prob=(round(body_language_prob[np.argmax(body_language_prob)],2))
    print(" The pose detected is:", pose_detected)
    print("probablity :", prob)

    if results.pose_landmarks:
        mp_drawing.draw_landmarks(img_copy,results.pose_landmarks,mp_pose.POSE_CONNECTIONS,
                                 mp_drawing.DrawingSpec(color=((255,127,80)), thickness=1,circle_radius=2),
                                 mp_drawing.DrawingSpec(color=(50,205,50),thickness =1,circle_radius=2)
                                 )
        fig=plt.figure(figsize=[10,10])
        plt.title("output-image")
        plt.axis("off")
        plt.imshow(img_copy[:,:,::-1])
        plt.show()

    
In [50]:
poseclassify("C:\\Users\\jhaan\\Desktop\\Anand_01.jpg")
3 [4.1104291e-04 2.7608846e-03 5.8537070e-02 9.3333369e-01 4.9572806e-03]
 The pose detected is: VIRABHADRASANA
probablity : 0.93
In [51]:
poseclassify("C:\\Users\\jhaan\\Desktop\\Anand_02.jpg")
4 [5.4400192e-05 1.2273241e-04 3.3141478e-04 1.3932917e-04 9.9935216e-01]
 The pose detected is: VRIKSHASANA
probablity : 1.0
In [52]:
poseclassify("C:\\Users\\jhaan\\Desktop\\Anand_03.jpg")
2 [5.6868169e-04 7.0563750e-04 9.9182796e-01 1.5963842e-03 5.3013591e-03]
 The pose detected is: UTKATA KONASANA
probablity : 0.99
In [53]:
poseclassify("C:\\Users\\jhaan\\Desktop\\Anand_04.jpg")
1 [0.10909098 0.83369267 0.00393354 0.04577509 0.00750775]
 The pose detected is: BALASANA
probablity : 0.83
In [54]:
poseclassify("C:\\Users\\jhaan\\Desktop\\Anand_05.jpg")
0 [0.7829191  0.20188373 0.00104496 0.01267623 0.00147592]
 The pose detected is: Adho Mukha Svanasana
probablity : 0.78
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 

Pose Detection from Real-Time Video¶

In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [55]:
def yogabenifit(yoga_pose):
    if yoga_pose == "VIRABHADRASANA":
        cv2.putText(image,"It strengthen the arms, shoulders, and legs.", (20, frame.shape[0] - 100),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),1,cv2.LINE_AA)
        cv2.putText(image,"It maintain balance in the body.", (20, frame.shape[0] - 80),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),1,cv2.LINE_AA)
        cv2.putText(image,"It increase stamina, muscle endurance and relieve tension", (20, frame.shape[0] - 60),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),1,cv2.LINE_AA)
       
    if yoga_pose == "Adho Mukha Svanasana":
        cv2.putText(image,"It may help improve digestion.", (20, frame.shape[0] - 100),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),1,cv2.LINE_AA)
        cv2.putText(image,"It might help stimulate circulation.", (20, frame.shape[0] - 80),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),1,cv2.LINE_AA)
        cv2.putText(image,"It may help to relieve leg pain and ankle pain.", (20, frame.shape[0] - 60),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),1,cv2.LINE_AA)
                   
    if yoga_pose == "UTKATA KONASANA":
        cv2.putText(image," It strengthens the pelvic floor, thighs, knees, and ankles.", (20, frame.shape[0] - 100),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),1,cv2.LINE_AA)
        cv2.putText(image,"It strengthens the spine and activates the Muladhara, Swadhisthan, and Manipur chakra. ", (20, frame.shape[0] - 80),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),1,cv2.LINE_AA)
        cv2.putText(image,"It also activates the heart chakra and promotes confidence and compassion.", (20, frame.shape[0] - 60),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),1,cv2.LINE_AA)
                                 
                    
    if yoga_pose == "BALASANA":
        cv2.putText(image,"Enhances blood circulation", (20, frame.shape[0] - 100),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),1,cv2.LINE_AA)
        cv2.putText(image,"Strengthens the ligaments in the knees", (20, frame.shape[0] - 80),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),1,cv2.LINE_AA)
        cv2.putText(image,"Effectively calms the mind", (20, frame.shape[0] - 60),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),1,cv2.LINE_AA)
                                 
                                 
                  
    if yoga_pose == "VRIKSHASANA":
        cv2.putText(image,"Boosts the balance of the body", (20, frame.shape[0] - 100),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),1,cv2.LINE_AA)
        cv2.putText(image,"Improves the posture.", (20, frame.shape[0] - 80),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),1,cv2.LINE_AA)
        cv2.putText(image,"Tones the muscles of your legs.", (20, frame.shape[0] - 60),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),1,cv2.LINE_AA)
                                 
                                 
 
                    
In [56]:
#pip install pyttsx3
import pyttsx3

# Initialize the text-to-speech engine
engine = pyttsx3.init()
def text2speech(yoga_pose):
    
    engine.say(f"it is {yoga_pose} pose ")
    engine.runAndWait()
In [57]:
cap = cv2.VideoCapture(0)

with mp_pose.Pose(min_detection_confidence=0.9, min_tracking_confidence=0.9) as pose:
    while cap.isOpened():
        
        ret, frame =cap.read()
        
        image= cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        image.flags.writeable = False
        
        results = pose.process(image)
        
        image.flags.writeable = True
        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        
        mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,
                                  mp_drawing.DrawingSpec(color=(255,127,80), thickness=2, circle_radius=2),
                                  mp_drawing.DrawingSpec(color=(50,205,50), thickness=2, circle_radius=2))
        
        
        
        try:
            if results.pose_landmarks is not None:
                
                landmarks = results.pose_landmarks.landmark
                pose_row=list(np.array([[landmark.x,landmark.y,landmark.x] for landmark in landmarks]).flatten())
                X= pd.DataFrame([pose_row])
                X.columns = A
                body_language_class = xgb_model_loaded.predict(X)[0]
                body_language_prob =xgb_model_loaded.predict_proba(X)[0]

                yoga_pose= yoga_pose_dict[body_language_class]
                #print("pose detected:", yoga_pose)
                prob=(round(body_language_prob[np.argmax(body_language_prob)],2))

                if prob>=0.85:
                    grade="Very Good"

                if prob<0.85 and prob>=0.80:
                    grade="Good"

                if prob<0.80:
                    grade="Needs Improvement"

                cv2.rectangle(image,(0,0), (1080,60), (0,0,16), -1)

                if prob>=0.75 and len(landmarks) >= 25:
                    cv2.putText(image,"Yoga Pose Detected",(150,12), cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,255), 1,cv2.LINE_AA)
                    cv2.putText(image, yoga_pose,(150,35),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,255), 1, cv2.LINE_AA)
                    
                    if prob>=0.80:
                        
                        yogabenifit(yoga_pose)
                    

                    cv2.putText(image,"Probability", (30,12),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1,cv2.LINE_AA)
                    cv2.putText(image,str(round(body_language_prob[np.argmax(body_language_prob)],2)), (30,35),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1,cv2.LINE_AA)

                    cv2.putText(image,"System_comment", (400,12),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1,cv2.LINE_AA)

                    cv2.putText(image,grade, (400,35),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1,cv2.LINE_AA)

                else:

                    cv2.putText(image,"No pose detected", (180,30),cv2.FONT_HERSHEY_SIMPLEX, 0.8,(255,255,255),1,cv2.LINE_AA)
                    cv2.putText(image,"please let full body come into the frame", (100,50),cv2.FONT_HERSHEY_SIMPLEX, 0.8,(255,255,255),1,cv2.LINE_AA)


                if prob >= 0.75:
                    if not in_pose:
                        in_pose = True
                        pose_start_time = cap.get(cv2.CAP_PROP_POS_MSEC)
                        # Speak when the pose is initially detected
                        
                        
                    else:
                        pose_end_time = cap.get(cv2.CAP_PROP_POS_MSEC)
                        total_time = (pose_end_time - pose_start_time) / 1000  # Convert to seconds
                        
                       
                        # Display the result
                        cv2.putText(image, f"Total time in {yoga_pose}: {total_time} seconds",
                                    (150, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 0), 1, cv2.LINE_AA)
                        
                        
                        
                        
                    if not in_pose:
                        in_pose = True
                        pose_start_time = cap.get(cv2.CAP_PROP_POS_MSEC)
                       
                    else:
                        pose_end_time = cap.get(cv2.CAP_PROP_POS_MSEC)
                        total_time = (pose_end_time - pose_start_time) / 1000 
                        
                        
                        if total_time >= 2:  # Example: Speak after 5 seconds in the pose
                            # Speak after certain time spent in the pose
                            text2speech(yoga_pose)
                            
                       


                else:
                    in_pose = False
            
            #else:
                
                #print("")

        except Exception as e:
            print(e)
        
        
        
        
        cv2.namedWindow("Resized_pose_detection_window", cv2.WINDOW_NORMAL)
        
        cv2.resizeWindow("Resized_pose_detection_window",2000, 1000)
                
        cv2.imshow("Resized_pose_detection_window", image) 
        
        
        if cv2.waitKey(10) & 0xFF == ord('q'):
            break

            
            
            
cap.release()
cv2.destroyAllWindows()
            
        
In [ ]:

In [ ]:
 
In [ ]:
 
In [ ]:

In [ ]:
 
In [ ]: